From b7a3a5d941158f389cc6151281d04f3b9b32a332 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Thu, 21 Sep 2023 12:21:45 -0700 Subject: [PATCH 01/13] Add more duration info to SG request stats log (#5573) * include get_all, merge and total latency to store gateway series request log Signed-off-by: Ben Ye * update thanos and add metrics to cortex Signed-off-by: Ben Ye * update changelog Signed-off-by: Ben Ye * update again Signed-off-by: Ben Ye --------- Signed-off-by: Ben Ye --- CHANGELOG.md | 1 + go.mod | 8 +- go.sum | 16 +- pkg/querier/blocks_store_queryable.go | 12 +- pkg/storegateway/bucket_store_metrics.go | 20 +- pkg/storegateway/bucket_store_metrics_test.go | 61 ++- vendor/github.com/dgryski/go-metro/LICENSE | 24 -- vendor/github.com/dgryski/go-metro/README | 6 - vendor/github.com/dgryski/go-metro/metro.py | 199 ---------- .../github.com/dgryski/go-metro/metro128.go | 94 ----- vendor/github.com/dgryski/go-metro/metro64.go | 88 ----- .../github.com/dgryski/go-metro/metro_amd64.s | 372 ------------------ .../github.com/dgryski/go-metro/metro_stub.go | 10 - .../seiflotfy/cuckoofilter/.gitignore | 26 -- .../github.com/seiflotfy/cuckoofilter/LICENSE | 22 -- .../seiflotfy/cuckoofilter/README.md | 62 --- .../seiflotfy/cuckoofilter/bucket.go | 45 --- .../seiflotfy/cuckoofilter/cuckoofilter.go | 165 -------- .../github.com/seiflotfy/cuckoofilter/doc.go | 35 -- .../cuckoofilter/scalable_cuckoofilter.go | 170 -------- .../github.com/seiflotfy/cuckoofilter/util.go | 52 --- .../thanos-io/objstore/CHANGELOG.md | 6 + .../github.com/thanos-io/objstore/README.md | 3 + .../github.com/thanos-io/objstore/objstore.go | 30 +- .../objstore/providers/azure/azure.go | 27 +- .../objstore/providers/azure/helpers.go | 10 + .../thanos-io/objstore/providers/gcs/gcs.go | 10 +- .../thanos-io/objstore/providers/s3/s3.go | 9 +- .../thanos/pkg/cacheutil/async_op.go | 6 +- .../thanos/pkg/cacheutil/memcached_client.go | 2 +- .../pkg/compact/downsample/downsample.go | 11 +- .../thanos-io/thanos/pkg/store/bucket.go | 201 +++++----- .../thanos-io/thanos/pkg/store/flushable.go | 21 +- .../thanos/pkg/store/hintspb/custom.go | 3 + .../thanos/pkg/store/hintspb/hints.pb.go | 230 ++++++++--- .../thanos/pkg/store/hintspb/hints.proto | 3 + .../thanos-io/thanos/pkg/store/prometheus.go | 7 +- .../thanos-io/thanos/pkg/store/proxy_heap.go | 60 +-- .../thanos-io/thanos/pkg/store/tsdb.go | 43 +- .../thanos-io/thanos/pkg/stringset/set.go | 101 ----- vendor/golang.org/x/sys/cpu/cpu.go | 5 +- vendor/golang.org/x/sys/cpu/cpu_x86.go | 7 + vendor/golang.org/x/sys/unix/mkerrors.sh | 1 + vendor/golang.org/x/sys/unix/syscall_linux.go | 23 ++ vendor/golang.org/x/sys/unix/syscall_unix.go | 3 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 17 + .../golang.org/x/sys/unix/zsyscall_linux.go | 20 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 15 + .../x/sys/windows/syscall_windows.go | 11 +- .../x/sys/windows/zsyscall_windows.go | 26 +- vendor/modules.txt | 13 +- 51 files changed, 594 insertions(+), 1818 deletions(-) delete mode 100644 vendor/github.com/dgryski/go-metro/LICENSE delete mode 100644 vendor/github.com/dgryski/go-metro/README delete mode 100644 vendor/github.com/dgryski/go-metro/metro.py delete mode 100644 vendor/github.com/dgryski/go-metro/metro128.go delete mode 100644 vendor/github.com/dgryski/go-metro/metro64.go delete mode 100644 vendor/github.com/dgryski/go-metro/metro_amd64.s delete mode 100644 vendor/github.com/dgryski/go-metro/metro_stub.go delete mode 100644 vendor/github.com/seiflotfy/cuckoofilter/.gitignore delete mode 100644 vendor/github.com/seiflotfy/cuckoofilter/LICENSE delete mode 100644 vendor/github.com/seiflotfy/cuckoofilter/README.md delete mode 100644 vendor/github.com/seiflotfy/cuckoofilter/bucket.go delete mode 100644 vendor/github.com/seiflotfy/cuckoofilter/cuckoofilter.go delete mode 100644 vendor/github.com/seiflotfy/cuckoofilter/doc.go delete mode 100644 vendor/github.com/seiflotfy/cuckoofilter/scalable_cuckoofilter.go delete mode 100644 vendor/github.com/seiflotfy/cuckoofilter/util.go delete mode 100644 vendor/github.com/thanos-io/thanos/pkg/stringset/set.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b827b22a3..cd6df786cf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -64,6 +64,7 @@ * [ENHANCEMENT] Querier: Retry store gateway client connection closing gRPC error. #5558 * [ENHANCEMENT] QueryFrontend: Add generic retry for all APIs. #5561. * [ENHANCEMENT] QueryFrontend: Add metric for number of series requests. #5373 +* [ENHANCEMENT] Store Gateway: Add histogram metrics for total time spent fetching series and chunks per request. #5573 * [BUGFIX] Ruler: Validate if rule group can be safely converted back to rule group yaml from protobuf message #5265 * [BUGFIX] Querier: Convert gRPC `ResourceExhausted` status code from store gateway to 422 limit error. #5286 * [BUGFIX] Alertmanager: Route web-ui requests to the alertmanager distributor when sharding is enabled. #5293 diff --git a/go.mod b/go.mod index 2d3bd3850a..2043387dfb 100644 --- a/go.mod +++ b/go.mod @@ -51,9 +51,9 @@ require ( github.com/sony/gobreaker v0.5.0 github.com/spf13/afero v1.9.5 github.com/stretchr/testify v1.8.4 - github.com/thanos-io/objstore v0.0.0-20230816175749-20395bffdf26 + github.com/thanos-io/objstore v0.0.0-20230921130928-63a603e651ed github.com/thanos-io/promql-engine v0.0.0-20230821193351-e1ae4275b96e - github.com/thanos-io/thanos v0.32.3-0.20230911095949-f6a39507b6bd + github.com/thanos-io/thanos v0.32.4-0.20230921182036-6257767ec9d0 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d go.etcd.io/etcd/api/v3 v3.5.9 @@ -117,7 +117,6 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect - github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/docker/go-units v0.5.0 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect @@ -190,7 +189,6 @@ require ( github.com/rs/cors v1.9.0 // indirect github.com/rs/xid v1.5.0 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect - github.com/seiflotfy/cuckoofilter v0.0.0-20220411075957-e3b120b3f5fb // indirect github.com/sercand/kuberesolver v2.4.0+incompatible // indirect github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c // indirect github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 // indirect @@ -221,7 +219,7 @@ require ( golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect golang.org/x/mod v0.12.0 // indirect golang.org/x/oauth2 v0.11.0 // indirect - golang.org/x/sys v0.11.0 // indirect + golang.org/x/sys v0.12.0 // indirect golang.org/x/text v0.12.0 // indirect golang.org/x/tools v0.11.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect diff --git a/go.sum b/go.sum index a89bbfe090..c94e572657 100644 --- a/go.sum +++ b/go.sum @@ -549,8 +549,6 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165 h1:BS21ZUJ/B5X2UVUbczfmdWH7GapPWAhxcMsDnjJTU1E= -github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dhui/dktest v0.3.16 h1:i6gq2YQEtcrjKbeJpBkWjE8MmLZPYllcjOFbTZuPDnw= @@ -1158,8 +1156,6 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= -github.com/seiflotfy/cuckoofilter v0.0.0-20220411075957-e3b120b3f5fb h1:XfLJSPIOUX+osiMraVgIrMR27uMXnRJWGm1+GL8/63U= -github.com/seiflotfy/cuckoofilter v0.0.0-20220411075957-e3b120b3f5fb/go.mod h1:bR6DqgcAl1zTcOX8/pE2Qkj9XO00eCNqmKb7lXP8EAg= github.com/sercand/kuberesolver/v4 v4.0.0 h1:frL7laPDG/lFm5n98ODmWnn+cvPpzlkf3LhzuPhcHP4= github.com/sercand/kuberesolver/v4 v4.0.0/go.mod h1:F4RGyuRmMAjeXHKL+w4P7AwUnPceEAPAhxUgXZjKgvM= github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs= @@ -1212,12 +1208,12 @@ github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNG github.com/tencentyun/cos-go-sdk-v5 v0.7.40 h1:W6vDGKCHe4wBACI1d2UgE6+50sJFhRWU4O8IB2ozzxM= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= -github.com/thanos-io/objstore v0.0.0-20230816175749-20395bffdf26 h1:q1lin/af0lw+I3sS79ccHs2CLjFOPc190J9saeQ5qQ4= -github.com/thanos-io/objstore v0.0.0-20230816175749-20395bffdf26/go.mod h1:oJ82xgcBDzGJrEgUsjlTj6n01+ZWUMMUR8BlZzX5xDE= +github.com/thanos-io/objstore v0.0.0-20230921130928-63a603e651ed h1:iWQdY3S6DpWjelVvKKSKgS7LeLkhK4VaEnQfphB9ZXA= +github.com/thanos-io/objstore v0.0.0-20230921130928-63a603e651ed/go.mod h1:oJ82xgcBDzGJrEgUsjlTj6n01+ZWUMMUR8BlZzX5xDE= github.com/thanos-io/promql-engine v0.0.0-20230821193351-e1ae4275b96e h1:kwsFCU8eSkZehbrAN3nXPw5RdMHi/Bok/y8l2C4M+gk= github.com/thanos-io/promql-engine v0.0.0-20230821193351-e1ae4275b96e/go.mod h1:+T/ZYNCGybT6eTsGGvVtGb63nT1cvUmH6MjqRrcQoKw= -github.com/thanos-io/thanos v0.32.3-0.20230911095949-f6a39507b6bd h1:JAXqwb/nzY7WzijekZrhrL63m988VLyoFUEaKLU15iA= -github.com/thanos-io/thanos v0.32.3-0.20230911095949-f6a39507b6bd/go.mod h1:J81dp4qaOX+GfPmRoYqu/aZXfEBri7+i3TzY2xamthg= +github.com/thanos-io/thanos v0.32.4-0.20230921182036-6257767ec9d0 h1:T9Vot+BQao6M6j8F0JQbseAqtniOw1Csz+QHRRRwF48= +github.com/thanos-io/thanos v0.32.4-0.20230921182036-6257767ec9d0/go.mod h1:Px5Boq60s+2WwR+V4v4oxgmxfw9WHrwMwjRou6pkUNw= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -1607,8 +1603,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 5729a82a17..b3e7f9073c 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -609,6 +609,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( return errors.Wrapf(err, "failed to create series request") } + begin := time.Now() stream, err := c.Series(gCtx, req) if err != nil { if isRetryableError(err) { @@ -725,10 +726,10 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( "requested blocks", strings.Join(convertULIDsToString(blockIDs), " "), "queried blocks", strings.Join(convertULIDsToString(myQueriedBlocks), " ")) - // It is also interesting to look at data downloaded at store gateway even if - // no series got matched, but to reduce verbosity we are more interested in those - // matched case. With vertical sharding enabled it is easy to log too much. - if numSeries > 0 { + // Use number of blocks queried to check whether we should log the query + // or not. It might be logging too much but good to understand per request + // performance. + if seriesQueryStats.BlocksQueried > 0 { level.Info(spanLog).Log("msg", "store gateway series request stats", "instance", c.RemoteAddress(), "queryable_chunk_bytes_fetched", chunkBytes, @@ -753,6 +754,9 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( "chunks_fetch_count", seriesQueryStats.ChunksFetchCount, "chunks_fetched_size_sum", seriesQueryStats.ChunksFetchedSizeSum, "data_downloaded_size_sum", seriesQueryStats.DataDownloadedSizeSum, + "get_all_duration", seriesQueryStats.GetAllDuration, + "merge_duration", seriesQueryStats.MergeDuration, + "response_time", time.Since(begin), ) } diff --git a/pkg/storegateway/bucket_store_metrics.go b/pkg/storegateway/bucket_store_metrics.go index 3977b85480..cb1cf1152b 100644 --- a/pkg/storegateway/bucket_store_metrics.go +++ b/pkg/storegateway/bucket_store_metrics.go @@ -38,9 +38,11 @@ type BucketStoreMetrics struct { cachedPostingsOriginalSizeBytes *prometheus.Desc cachedPostingsCompressedSizeBytes *prometheus.Desc - seriesFetchDuration *prometheus.Desc - postingsFetchDuration *prometheus.Desc - chunkFetchDuration *prometheus.Desc + seriesFetchDuration *prometheus.Desc + seriesFetchDurationSum *prometheus.Desc + postingsFetchDuration *prometheus.Desc + chunkFetchDuration *prometheus.Desc + chunkFetchDurationSum *prometheus.Desc lazyExpandedPostingsCount *prometheus.Desc lazyExpandedPostingSizeBytes *prometheus.Desc @@ -160,6 +162,10 @@ func NewBucketStoreMetrics() *BucketStoreMetrics { "cortex_bucket_store_series_fetch_duration_seconds", "Time it takes to fetch series to respond a request sent to store-gateway. It includes both the time to fetch it from cache and from storage in case of cache misses.", nil, nil), + seriesFetchDurationSum: prometheus.NewDesc( + "cortex_bucket_store_series_fetch_duration_sum_seconds", + "The time it takes to fetch postings to respond to a request sent to a store gateway. It includes both the time to fetch it from the cache and from storage in case of cache misses.", + nil, nil), postingsFetchDuration: prometheus.NewDesc( "cortex_bucket_store_postings_fetch_duration_seconds", "Time it takes to fetch postings to respond a request sent to store-gateway. It includes both the time to fetch it from cache and from storage in case of cache misses.", @@ -168,6 +174,10 @@ func NewBucketStoreMetrics() *BucketStoreMetrics { "cortex_bucket_store_chunks_fetch_duration_seconds", "The total time spent fetching chunks within a single request a store gateway.", nil, nil), + chunkFetchDurationSum: prometheus.NewDesc( + "cortex_bucket_store_chunks_fetch_duration_sum_seconds", + "The total absolute time spent fetching chunks within a single request for one block.", + nil, nil), indexHeaderLazyLoadCount: prometheus.NewDesc( "cortex_bucket_store_indexheader_lazy_load_total", @@ -241,8 +251,10 @@ func (m *BucketStoreMetrics) Describe(out chan<- *prometheus.Desc) { out <- m.cachedPostingsCompressedSizeBytes out <- m.seriesFetchDuration + out <- m.seriesFetchDurationSum out <- m.postingsFetchDuration out <- m.chunkFetchDuration + out <- m.chunkFetchDurationSum out <- m.indexHeaderLazyLoadCount out <- m.indexHeaderLazyLoadFailedCount @@ -288,8 +300,10 @@ func (m *BucketStoreMetrics) Collect(out chan<- prometheus.Metric) { data.SendSumOfCountersWithLabels(out, m.cachedPostingsCompressedSizeBytes, "thanos_bucket_store_cached_postings_compressed_size_bytes_total") data.SendSumOfHistograms(out, m.seriesFetchDuration, "thanos_bucket_store_series_fetch_duration_seconds") + data.SendSumOfHistograms(out, m.seriesFetchDurationSum, "thanos_bucket_store_series_fetch_duration_sum_seconds") data.SendSumOfHistograms(out, m.postingsFetchDuration, "thanos_bucket_store_postings_fetch_duration_seconds") data.SendSumOfHistograms(out, m.chunkFetchDuration, "thanos_bucket_store_chunks_fetch_duration_seconds") + data.SendSumOfHistograms(out, m.chunkFetchDurationSum, "thanos_bucket_store_chunks_fetch_duration_sum_seconds") data.SendSumOfCounters(out, m.indexHeaderLazyLoadCount, "thanos_bucket_store_indexheader_lazy_load_total") data.SendSumOfCounters(out, m.indexHeaderLazyLoadFailedCount, "thanos_bucket_store_indexheader_lazy_load_failed_total") diff --git a/pkg/storegateway/bucket_store_metrics_test.go b/pkg/storegateway/bucket_store_metrics_test.go index 33061ca25c..37bccc1d57 100644 --- a/pkg/storegateway/bucket_store_metrics_test.go +++ b/pkg/storegateway/bucket_store_metrics_test.go @@ -290,6 +290,25 @@ func TestBucketStoreMetrics(t *testing.T) { cortex_bucket_store_series_data_touched_sum{data_type="touched-c"} 180152 cortex_bucket_store_series_data_touched_count{data_type="touched-c"} 3 + # HELP cortex_bucket_store_series_fetch_duration_sum_seconds The time it takes to fetch postings to respond to a request sent to a store gateway. It includes both the time to fetch it from the cache and from storage in case of cache misses. + # TYPE cortex_bucket_store_series_fetch_duration_sum_seconds histogram + cortex_bucket_store_series_fetch_duration_sum_seconds_bucket{le="0.001"} 0 + cortex_bucket_store_series_fetch_duration_sum_seconds_bucket{le="0.01"} 0 + cortex_bucket_store_series_fetch_duration_sum_seconds_bucket{le="0.1"} 0 + cortex_bucket_store_series_fetch_duration_sum_seconds_bucket{le="0.3"} 0 + cortex_bucket_store_series_fetch_duration_sum_seconds_bucket{le="0.6"} 0 + cortex_bucket_store_series_fetch_duration_sum_seconds_bucket{le="1"} 0 + cortex_bucket_store_series_fetch_duration_sum_seconds_bucket{le="3"} 0 + cortex_bucket_store_series_fetch_duration_sum_seconds_bucket{le="6"} 0 + cortex_bucket_store_series_fetch_duration_sum_seconds_bucket{le="9"} 0 + cortex_bucket_store_series_fetch_duration_sum_seconds_bucket{le="20"} 0 + cortex_bucket_store_series_fetch_duration_sum_seconds_bucket{le="30"} 0 + cortex_bucket_store_series_fetch_duration_sum_seconds_bucket{le="60"} 0 + cortex_bucket_store_series_fetch_duration_sum_seconds_bucket{le="90"} 0 + cortex_bucket_store_series_fetch_duration_sum_seconds_bucket{le="120"} 0 + cortex_bucket_store_series_fetch_duration_sum_seconds_bucket{le="+Inf"} 3 + cortex_bucket_store_series_fetch_duration_sum_seconds_sum 1.306102e+06 + cortex_bucket_store_series_fetch_duration_sum_seconds_count 3 # HELP cortex_bucket_store_series_get_all_duration_seconds Time it takes until all per-block prepares and preloads for a query are finished. # TYPE cortex_bucket_store_series_get_all_duration_seconds histogram cortex_bucket_store_series_get_all_duration_seconds_bucket{le="0.001"} 0 @@ -395,7 +414,7 @@ func TestBucketStoreMetrics(t *testing.T) { # HELP cortex_bucket_store_chunk_refetches_total Total number of cases where configured estimated chunk bytes was not enough was to fetch chunks from object store, resulting in refetch. # TYPE cortex_bucket_store_chunk_refetches_total counter - cortex_bucket_store_chunk_refetches_total 0 + cortex_bucket_store_chunk_refetches_total 765646 # HELP cortex_bucket_store_cached_postings_compressed_size_bytes_total Compressed size of postings stored into cache. # TYPE cortex_bucket_store_cached_postings_compressed_size_bytes_total counter @@ -439,6 +458,25 @@ func TestBucketStoreMetrics(t *testing.T) { cortex_bucket_store_chunks_fetch_duration_seconds_bucket{le="+Inf"} 3 cortex_bucket_store_chunks_fetch_duration_seconds_sum 1.328621e+06 cortex_bucket_store_chunks_fetch_duration_seconds_count 3 + # HELP cortex_bucket_store_chunks_fetch_duration_sum_seconds The total absolute time spent fetching chunks within a single request for one block. + # TYPE cortex_bucket_store_chunks_fetch_duration_sum_seconds histogram + cortex_bucket_store_chunks_fetch_duration_sum_seconds_bucket{le="0.001"} 0 + cortex_bucket_store_chunks_fetch_duration_sum_seconds_bucket{le="0.01"} 0 + cortex_bucket_store_chunks_fetch_duration_sum_seconds_bucket{le="0.1"} 0 + cortex_bucket_store_chunks_fetch_duration_sum_seconds_bucket{le="0.3"} 0 + cortex_bucket_store_chunks_fetch_duration_sum_seconds_bucket{le="0.6"} 0 + cortex_bucket_store_chunks_fetch_duration_sum_seconds_bucket{le="1"} 0 + cortex_bucket_store_chunks_fetch_duration_sum_seconds_bucket{le="3"} 0 + cortex_bucket_store_chunks_fetch_duration_sum_seconds_bucket{le="6"} 0 + cortex_bucket_store_chunks_fetch_duration_sum_seconds_bucket{le="9"} 0 + cortex_bucket_store_chunks_fetch_duration_sum_seconds_bucket{le="20"} 0 + cortex_bucket_store_chunks_fetch_duration_sum_seconds_bucket{le="30"} 0 + cortex_bucket_store_chunks_fetch_duration_sum_seconds_bucket{le="60"} 0 + cortex_bucket_store_chunks_fetch_duration_sum_seconds_bucket{le="90"} 0 + cortex_bucket_store_chunks_fetch_duration_sum_seconds_bucket{le="120"} 0 + cortex_bucket_store_chunks_fetch_duration_sum_seconds_bucket{le="+Inf"} 3 + cortex_bucket_store_chunks_fetch_duration_sum_seconds_sum 1.328621e+06 + cortex_bucket_store_chunks_fetch_duration_sum_seconds_count 3 # HELP cortex_bucket_store_empty_postings_total Total number of empty postings when fetching block series. # TYPE cortex_bucket_store_empty_postings_total counter cortex_bucket_store_empty_postings_total 112595 @@ -604,6 +642,7 @@ func populateMockedBucketStoreMetrics(base float64) *prometheus.Registry { m.chunkSizeBytes.Observe(11 * base) m.seriesRefetches.Add(33 * base) + m.chunkRefetches.Add(34 * base) m.cachedPostingsCompressions.WithLabelValues("encode").Add(50 * base) m.cachedPostingsCompressions.WithLabelValues("decode").Add(51 * base) @@ -618,8 +657,10 @@ func populateMockedBucketStoreMetrics(base float64) *prometheus.Registry { m.cachedPostingsCompressedSizeBytes.Add(57 * base) m.seriesFetchDuration.Observe(58 * base) + m.seriesFetchDurationSum.Observe(58 * base) m.postingsFetchDuration.Observe(59 * base) m.chunkFetchDuration.Observe(59 * base) + m.chunkFetchDurationSum.Observe(59 * base) m.indexHeaderLazyLoadCount.Add(60 * base) m.indexHeaderLazyLoadFailedCount.Add(61 * base) @@ -664,9 +705,11 @@ type mockedBucketStoreMetrics struct { cachedPostingsOriginalSizeBytes prometheus.Counter cachedPostingsCompressedSizeBytes prometheus.Counter - seriesFetchDuration prometheus.Histogram - postingsFetchDuration prometheus.Histogram - chunkFetchDuration prometheus.Histogram + seriesFetchDuration prometheus.Histogram + seriesFetchDurationSum prometheus.Histogram + postingsFetchDuration prometheus.Histogram + chunkFetchDuration prometheus.Histogram + chunkFetchDurationSum prometheus.Histogram indexHeaderLazyLoadCount prometheus.Counter indexHeaderLazyLoadFailedCount prometheus.Counter @@ -801,6 +844,11 @@ func newMockedBucketStoreMetrics(reg prometheus.Registerer) *mockedBucketStoreMe Help: "Time it takes to fetch series from a bucket to respond a query. It also includes the time it takes to cache fetch and store operations.", Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, }) + m.seriesFetchDurationSum = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + Name: "thanos_bucket_store_series_fetch_duration_sum_seconds", + Help: "The total time it takes to fetch series to respond to a request sent to a store gateway across all series batches. It includes both the time to fetch it from the cache and from storage in case of cache misses.", + Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, + }) m.postingsFetchDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ Name: "thanos_bucket_store_postings_fetch_duration_seconds", Help: "Time it takes to fetch postings from a bucket to respond a query. It also includes the time it takes to cache fetch and store operations.", @@ -811,6 +859,11 @@ func newMockedBucketStoreMetrics(reg prometheus.Registerer) *mockedBucketStoreMe Help: "The total time spent fetching chunks within a single request a store gateway.", Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, }) + m.chunkFetchDurationSum = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + Name: "thanos_bucket_store_chunks_fetch_duration_sum_seconds", + Help: "The total absolute time spent fetching chunks within a single request for one block.", + Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, + }) m.indexHeaderLazyLoadCount = promauto.With(reg).NewCounter(prometheus.CounterOpts{ Name: "thanos_bucket_store_indexheader_lazy_load_total", diff --git a/vendor/github.com/dgryski/go-metro/LICENSE b/vendor/github.com/dgryski/go-metro/LICENSE deleted file mode 100644 index 6243b617cf..0000000000 --- a/vendor/github.com/dgryski/go-metro/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -This package is a mechanical translation of the reference C++ code for -MetroHash, available at https://github.com/jandrewrogers/MetroHash - -The MIT License (MIT) - -Copyright (c) 2016 Damian Gryski - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/dgryski/go-metro/README b/vendor/github.com/dgryski/go-metro/README deleted file mode 100644 index 5ecebb3853..0000000000 --- a/vendor/github.com/dgryski/go-metro/README +++ /dev/null @@ -1,6 +0,0 @@ -MetroHash - -This package is a mechanical translation of the reference C++ code for -MetroHash, available at https://github.com/jandrewrogers/MetroHash - -I claim no additional copyright over the original implementation. diff --git a/vendor/github.com/dgryski/go-metro/metro.py b/vendor/github.com/dgryski/go-metro/metro.py deleted file mode 100644 index 8dd4d26e6a..0000000000 --- a/vendor/github.com/dgryski/go-metro/metro.py +++ /dev/null @@ -1,199 +0,0 @@ -import peachpy.x86_64 - -k0 = 0xD6D018F5 -k1 = 0xA2AA033B -k2 = 0x62992FC1 -k3 = 0x30BC5B29 - -def advance(p,l,c): - ADD(p,c) - SUB(l,c) - -def imul(r,k): - t = GeneralPurposeRegister64() - MOV(t, k) - IMUL(r, t) - -def update32(v, p,idx, k, vadd): - r = GeneralPurposeRegister64() - MOV(r, [p + idx]) - imul(r, k) - ADD(v, r) - ROR(v, 29) - ADD(v, vadd) - -def final32(v, regs, keys): - r = GeneralPurposeRegister64() - MOV(r, v[regs[1]]) - ADD(r, v[regs[2]]) - imul(r, keys[0]) - ADD(r, v[regs[3]]) - ROR(r, 37) - imul(r, keys[1]) - XOR(v[regs[0]], r) - -seed = Argument(uint64_t) -buffer_base = Argument(ptr()) -buffer_len = Argument(int64_t) -buffer_cap = Argument(int64_t) - -def makeHash(name, args): - with Function(name, args, uint64_t) as function: - - reg_ptr = GeneralPurposeRegister64() - reg_ptr_len = GeneralPurposeRegister64() - reg_hash = GeneralPurposeRegister64() - - LOAD.ARGUMENT(reg_hash, seed) - LOAD.ARGUMENT(reg_ptr, buffer_base) - LOAD.ARGUMENT(reg_ptr_len, buffer_len) - - imul(reg_hash, k0) - r = GeneralPurposeRegister64() - MOV(r, k2*k0) - ADD(reg_hash, r) - - after32 = Label("after32") - - CMP(reg_ptr_len, 32) - JL(after32) - v = [GeneralPurposeRegister64() for _ in range(4)] - for i in range(4): - MOV(v[i], reg_hash) - - with Loop() as loop: - update32(v[0], reg_ptr, 0, k0, v[2]) - update32(v[1], reg_ptr, 8, k1, v[3]) - update32(v[2], reg_ptr, 16, k2, v[0]) - update32(v[3], reg_ptr, 24, k3, v[1]) - - ADD(reg_ptr, 32) - SUB(reg_ptr_len, 32) - CMP(reg_ptr_len, 32) - JGE(loop.begin) - - final32(v, [2,0,3,1], [k0, k1]) - final32(v, [3,1,2,0], [k1, k0]) - final32(v, [0,0,2,3], [k0, k1]) - final32(v, [1,1,3,2], [k1, k0]) - - XOR(v[0], v[1]) - ADD(reg_hash, v[0]) - - LABEL(after32) - - after16 = Label("after16") - CMP(reg_ptr_len, 16) - JL(after16) - - for i in range(2): - MOV(v[i], [reg_ptr]) - imul(v[i], k2) - ADD(v[i], reg_hash) - - advance(reg_ptr, reg_ptr_len, 8) - - ROR(v[i], 29) - imul(v[i], k3) - - r = GeneralPurposeRegister64() - MOV(r, v[0]) - imul(r, k0) - ROR(r, 21) - ADD(r, v[1]) - XOR(v[0], r) - - MOV(r, v[1]) - imul(r, k3) - ROR(r, 21) - ADD(r, v[0]) - XOR(v[1], r) - - ADD(reg_hash, v[1]) - - LABEL(after16) - - after8 = Label("after8") - CMP(reg_ptr_len, 8) - JL(after8) - - r = GeneralPurposeRegister64() - MOV(r, [reg_ptr]) - imul(r, k3) - ADD(reg_hash, r) - advance(reg_ptr, reg_ptr_len, 8) - - MOV(r, reg_hash) - ROR(r, 55) - imul(r, k1) - XOR(reg_hash, r) - - LABEL(after8) - - after4 = Label("after4") - CMP(reg_ptr_len, 4) - JL(after4) - - r = GeneralPurposeRegister64() - XOR(r, r) - MOV(r.as_dword, dword[reg_ptr]) - imul(r, k3) - ADD(reg_hash, r) - advance(reg_ptr, reg_ptr_len, 4) - - MOV(r, reg_hash) - ROR(r, 26) - imul(r, k1) - XOR(reg_hash, r) - - LABEL(after4) - - after2 = Label("after2") - CMP(reg_ptr_len, 2) - JL(after2) - - r = GeneralPurposeRegister64() - XOR(r,r) - MOV(r.as_word, word[reg_ptr]) - imul(r, k3) - ADD(reg_hash, r) - advance(reg_ptr, reg_ptr_len, 2) - - MOV(r, reg_hash) - ROR(r, 48) - imul(r, k1) - XOR(reg_hash, r) - - LABEL(after2) - - after1 = Label("after1") - CMP(reg_ptr_len, 1) - JL(after1) - - r = GeneralPurposeRegister64() - MOVZX(r, byte[reg_ptr]) - imul(r, k3) - ADD(reg_hash, r) - - MOV(r, reg_hash) - ROR(r, 37) - imul(r, k1) - XOR(reg_hash, r) - - LABEL(after1) - - r = GeneralPurposeRegister64() - MOV(r, reg_hash) - ROR(r, 28) - XOR(reg_hash, r) - - imul(reg_hash, k0) - - MOV(r, reg_hash) - ROR(r, 29) - XOR(reg_hash, r) - - RETURN(reg_hash) - -makeHash("Hash64", (buffer_base, buffer_len, buffer_cap, seed)) -makeHash("Hash64Str", (buffer_base, buffer_len, seed)) \ No newline at end of file diff --git a/vendor/github.com/dgryski/go-metro/metro128.go b/vendor/github.com/dgryski/go-metro/metro128.go deleted file mode 100644 index e8dd8ddbf5..0000000000 --- a/vendor/github.com/dgryski/go-metro/metro128.go +++ /dev/null @@ -1,94 +0,0 @@ -package metro - -import "encoding/binary" - -func rotate_right(v uint64, k uint) uint64 { - return (v >> k) | (v << (64 - k)) -} - -func Hash128(buffer []byte, seed uint64) (uint64, uint64) { - - const ( - k0 = 0xC83A91E1 - k1 = 0x8648DBDB - k2 = 0x7BDEC03B - k3 = 0x2F5870A5 - ) - - ptr := buffer - - var v [4]uint64 - - v[0] = (seed - k0) * k3 - v[1] = (seed + k1) * k2 - - if len(ptr) >= 32 { - v[2] = (seed + k0) * k2 - v[3] = (seed - k1) * k3 - - for len(ptr) >= 32 { - v[0] += binary.LittleEndian.Uint64(ptr) * k0 - ptr = ptr[8:] - v[0] = rotate_right(v[0], 29) + v[2] - v[1] += binary.LittleEndian.Uint64(ptr) * k1 - ptr = ptr[8:] - v[1] = rotate_right(v[1], 29) + v[3] - v[2] += binary.LittleEndian.Uint64(ptr) * k2 - ptr = ptr[8:] - v[2] = rotate_right(v[2], 29) + v[0] - v[3] += binary.LittleEndian.Uint64(ptr) * k3 - ptr = ptr[8:] - v[3] = rotate_right(v[3], 29) + v[1] - } - - v[2] ^= rotate_right(((v[0]+v[3])*k0)+v[1], 21) * k1 - v[3] ^= rotate_right(((v[1]+v[2])*k1)+v[0], 21) * k0 - v[0] ^= rotate_right(((v[0]+v[2])*k0)+v[3], 21) * k1 - v[1] ^= rotate_right(((v[1]+v[3])*k1)+v[2], 21) * k0 - } - - if len(ptr) >= 16 { - v[0] += binary.LittleEndian.Uint64(ptr) * k2 - ptr = ptr[8:] - v[0] = rotate_right(v[0], 33) * k3 - v[1] += binary.LittleEndian.Uint64(ptr) * k2 - ptr = ptr[8:] - v[1] = rotate_right(v[1], 33) * k3 - v[0] ^= rotate_right((v[0]*k2)+v[1], 45) * k1 - v[1] ^= rotate_right((v[1]*k3)+v[0], 45) * k0 - } - - if len(ptr) >= 8 { - v[0] += binary.LittleEndian.Uint64(ptr) * k2 - ptr = ptr[8:] - v[0] = rotate_right(v[0], 33) * k3 - v[0] ^= rotate_right((v[0]*k2)+v[1], 27) * k1 - } - - if len(ptr) >= 4 { - v[1] += uint64(binary.LittleEndian.Uint32(ptr)) * k2 - ptr = ptr[4:] - v[1] = rotate_right(v[1], 33) * k3 - v[1] ^= rotate_right((v[1]*k3)+v[0], 46) * k0 - } - - if len(ptr) >= 2 { - v[0] += uint64(binary.LittleEndian.Uint16(ptr)) * k2 - ptr = ptr[2:] - v[0] = rotate_right(v[0], 33) * k3 - v[0] ^= rotate_right((v[0]*k2)+v[1], 22) * k1 - } - - if len(ptr) >= 1 { - v[1] += uint64(ptr[0]) * k2 - v[1] = rotate_right(v[1], 33) * k3 - v[1] ^= rotate_right((v[1]*k3)+v[0], 58) * k0 - } - - v[0] += rotate_right((v[0]*k0)+v[1], 13) - v[1] += rotate_right((v[1]*k1)+v[0], 37) - v[0] += rotate_right((v[0]*k2)+v[1], 13) - v[1] += rotate_right((v[1]*k3)+v[0], 37) - - return v[0], v[1] -} diff --git a/vendor/github.com/dgryski/go-metro/metro64.go b/vendor/github.com/dgryski/go-metro/metro64.go deleted file mode 100644 index 1c04228a0b..0000000000 --- a/vendor/github.com/dgryski/go-metro/metro64.go +++ /dev/null @@ -1,88 +0,0 @@ -// +build noasm !amd64 gccgo - -package metro - -import ( - "encoding/binary" - "math/bits" -) - -func Hash64(buffer []byte, seed uint64) uint64 { - - const ( - k0 = 0xD6D018F5 - k1 = 0xA2AA033B - k2 = 0x62992FC1 - k3 = 0x30BC5B29 - ) - - ptr := buffer - - hash := (seed + k2) * k0 - - if len(ptr) >= 32 { - v0, v1, v2, v3 := hash, hash, hash, hash - - for len(ptr) >= 32 { - v0 += binary.LittleEndian.Uint64(ptr[:8]) * k0 - v0 = bits.RotateLeft64(v0, -29) + v2 - v1 += binary.LittleEndian.Uint64(ptr[8:16]) * k1 - v1 = bits.RotateLeft64(v1, -29) + v3 - v2 += binary.LittleEndian.Uint64(ptr[16:24]) * k2 - v2 = bits.RotateLeft64(v2, -29) + v0 - v3 += binary.LittleEndian.Uint64(ptr[24:32]) * k3 - v3 = bits.RotateLeft64(v3, -29) + v1 - ptr = ptr[32:] - } - - v2 ^= bits.RotateLeft64(((v0+v3)*k0)+v1, -37) * k1 - v3 ^= bits.RotateLeft64(((v1+v2)*k1)+v0, -37) * k0 - v0 ^= bits.RotateLeft64(((v0+v2)*k0)+v3, -37) * k1 - v1 ^= bits.RotateLeft64(((v1+v3)*k1)+v2, -37) * k0 - hash += v0 ^ v1 - } - - if len(ptr) >= 16 { - v0 := hash + (binary.LittleEndian.Uint64(ptr[:8]) * k2) - v0 = bits.RotateLeft64(v0, -29) * k3 - v1 := hash + (binary.LittleEndian.Uint64(ptr[8:16]) * k2) - v1 = bits.RotateLeft64(v1, -29) * k3 - v0 ^= bits.RotateLeft64(v0*k0, -21) + v1 - v1 ^= bits.RotateLeft64(v1*k3, -21) + v0 - hash += v1 - ptr = ptr[16:] - } - - if len(ptr) >= 8 { - hash += binary.LittleEndian.Uint64(ptr[:8]) * k3 - ptr = ptr[8:] - hash ^= bits.RotateLeft64(hash, -55) * k1 - } - - if len(ptr) >= 4 { - hash += uint64(binary.LittleEndian.Uint32(ptr[:4])) * k3 - hash ^= bits.RotateLeft64(hash, -26) * k1 - ptr = ptr[4:] - } - - if len(ptr) >= 2 { - hash += uint64(binary.LittleEndian.Uint16(ptr[:2])) * k3 - ptr = ptr[2:] - hash ^= bits.RotateLeft64(hash, -48) * k1 - } - - if len(ptr) >= 1 { - hash += uint64(ptr[0]) * k3 - hash ^= bits.RotateLeft64(hash, -37) * k1 - } - - hash ^= bits.RotateLeft64(hash, -28) - hash *= k0 - hash ^= bits.RotateLeft64(hash, -29) - - return hash -} - -func Hash64Str(buffer string, seed uint64) uint64 { - return Hash64([]byte(buffer), seed) -} diff --git a/vendor/github.com/dgryski/go-metro/metro_amd64.s b/vendor/github.com/dgryski/go-metro/metro_amd64.s deleted file mode 100644 index 7fa4730a48..0000000000 --- a/vendor/github.com/dgryski/go-metro/metro_amd64.s +++ /dev/null @@ -1,372 +0,0 @@ -// +build !noasm -// +build !gccgo - -// Generated by PeachPy 0.2.0 from metro.py - -// func Hash64(buffer_base uintptr, buffer_len int64, buffer_cap int64, seed uint64) uint64 -TEXT ·Hash64(SB),4,$0-40 - MOVQ seed+24(FP), AX - MOVQ buffer_base+0(FP), BX - MOVQ buffer_len+8(FP), CX - MOVQ $3603962101, DX - IMULQ DX, AX - MOVQ $5961697176435608501, DX - ADDQ DX, AX - CMPQ CX, $32 - JLT after32 - MOVQ AX, DX - MOVQ AX, DI - MOVQ AX, SI - MOVQ AX, BP -loop_begin: - MOVQ 0(BX), R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - ADDQ R8, DX - RORQ $29, DX - ADDQ SI, DX - MOVQ 8(BX), R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - ADDQ R8, DI - RORQ $29, DI - ADDQ BP, DI - MOVQ 16(BX), R8 - MOVQ $1654206401, R9 - IMULQ R9, R8 - ADDQ R8, SI - RORQ $29, SI - ADDQ DX, SI - MOVQ 24(BX), R8 - MOVQ $817650473, R9 - IMULQ R9, R8 - ADDQ R8, BP - RORQ $29, BP - ADDQ DI, BP - ADDQ $32, BX - SUBQ $32, CX - CMPQ CX, $32 - JGE loop_begin - MOVQ DX, R8 - ADDQ BP, R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - ADDQ DI, R8 - RORQ $37, R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - XORQ R8, SI - MOVQ DI, R8 - ADDQ SI, R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - ADDQ DX, R8 - RORQ $37, R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - XORQ R8, BP - MOVQ DX, R8 - ADDQ SI, R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - ADDQ BP, R8 - RORQ $37, R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - XORQ R8, DX - MOVQ DI, R8 - ADDQ BP, R8 - MOVQ $2729050939, BP - IMULQ BP, R8 - ADDQ SI, R8 - RORQ $37, R8 - MOVQ $3603962101, SI - IMULQ SI, R8 - XORQ R8, DI - XORQ DI, DX - ADDQ DX, AX -after32: - CMPQ CX, $16 - JLT after16 - MOVQ 0(BX), DX - MOVQ $1654206401, DI - IMULQ DI, DX - ADDQ AX, DX - ADDQ $8, BX - SUBQ $8, CX - RORQ $29, DX - MOVQ $817650473, DI - IMULQ DI, DX - MOVQ 0(BX), DI - MOVQ $1654206401, SI - IMULQ SI, DI - ADDQ AX, DI - ADDQ $8, BX - SUBQ $8, CX - RORQ $29, DI - MOVQ $817650473, SI - IMULQ SI, DI - MOVQ DX, SI - MOVQ $3603962101, BP - IMULQ BP, SI - RORQ $21, SI - ADDQ DI, SI - XORQ SI, DX - MOVQ DI, SI - MOVQ $817650473, BP - IMULQ BP, SI - RORQ $21, SI - ADDQ DX, SI - XORQ SI, DI - ADDQ DI, AX -after16: - CMPQ CX, $8 - JLT after8 - MOVQ 0(BX), DX - MOVQ $817650473, DI - IMULQ DI, DX - ADDQ DX, AX - ADDQ $8, BX - SUBQ $8, CX - MOVQ AX, DX - RORQ $55, DX - MOVQ $2729050939, DI - IMULQ DI, DX - XORQ DX, AX -after8: - CMPQ CX, $4 - JLT after4 - XORQ DX, DX - MOVL 0(BX), DX - MOVQ $817650473, DI - IMULQ DI, DX - ADDQ DX, AX - ADDQ $4, BX - SUBQ $4, CX - MOVQ AX, DX - RORQ $26, DX - MOVQ $2729050939, DI - IMULQ DI, DX - XORQ DX, AX -after4: - CMPQ CX, $2 - JLT after2 - XORQ DX, DX - MOVW 0(BX), DX - MOVQ $817650473, DI - IMULQ DI, DX - ADDQ DX, AX - ADDQ $2, BX - SUBQ $2, CX - MOVQ AX, DX - RORQ $48, DX - MOVQ $2729050939, DI - IMULQ DI, DX - XORQ DX, AX -after2: - CMPQ CX, $1 - JLT after1 - MOVBQZX 0(BX), BX - MOVQ $817650473, CX - IMULQ CX, BX - ADDQ BX, AX - MOVQ AX, BX - RORQ $37, BX - MOVQ $2729050939, CX - IMULQ CX, BX - XORQ BX, AX -after1: - MOVQ AX, BX - RORQ $28, BX - XORQ BX, AX - MOVQ $3603962101, BX - IMULQ BX, AX - MOVQ AX, BX - RORQ $29, BX - XORQ BX, AX - MOVQ AX, ret+32(FP) - RET - -// func Hash64Str(buffer_base uintptr, buffer_len int64, seed uint64) uint64 -TEXT ·Hash64Str(SB),4,$0-32 - MOVQ seed+16(FP), AX - MOVQ buffer_base+0(FP), BX - MOVQ buffer_len+8(FP), CX - MOVQ $3603962101, DX - IMULQ DX, AX - MOVQ $5961697176435608501, DX - ADDQ DX, AX - CMPQ CX, $32 - JLT after32 - MOVQ AX, DX - MOVQ AX, DI - MOVQ AX, SI - MOVQ AX, BP -loop_begin: - MOVQ 0(BX), R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - ADDQ R8, DX - RORQ $29, DX - ADDQ SI, DX - MOVQ 8(BX), R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - ADDQ R8, DI - RORQ $29, DI - ADDQ BP, DI - MOVQ 16(BX), R8 - MOVQ $1654206401, R9 - IMULQ R9, R8 - ADDQ R8, SI - RORQ $29, SI - ADDQ DX, SI - MOVQ 24(BX), R8 - MOVQ $817650473, R9 - IMULQ R9, R8 - ADDQ R8, BP - RORQ $29, BP - ADDQ DI, BP - ADDQ $32, BX - SUBQ $32, CX - CMPQ CX, $32 - JGE loop_begin - MOVQ DX, R8 - ADDQ BP, R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - ADDQ DI, R8 - RORQ $37, R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - XORQ R8, SI - MOVQ DI, R8 - ADDQ SI, R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - ADDQ DX, R8 - RORQ $37, R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - XORQ R8, BP - MOVQ DX, R8 - ADDQ SI, R8 - MOVQ $3603962101, R9 - IMULQ R9, R8 - ADDQ BP, R8 - RORQ $37, R8 - MOVQ $2729050939, R9 - IMULQ R9, R8 - XORQ R8, DX - MOVQ DI, R8 - ADDQ BP, R8 - MOVQ $2729050939, BP - IMULQ BP, R8 - ADDQ SI, R8 - RORQ $37, R8 - MOVQ $3603962101, SI - IMULQ SI, R8 - XORQ R8, DI - XORQ DI, DX - ADDQ DX, AX -after32: - CMPQ CX, $16 - JLT after16 - MOVQ 0(BX), DX - MOVQ $1654206401, DI - IMULQ DI, DX - ADDQ AX, DX - ADDQ $8, BX - SUBQ $8, CX - RORQ $29, DX - MOVQ $817650473, DI - IMULQ DI, DX - MOVQ 0(BX), DI - MOVQ $1654206401, SI - IMULQ SI, DI - ADDQ AX, DI - ADDQ $8, BX - SUBQ $8, CX - RORQ $29, DI - MOVQ $817650473, SI - IMULQ SI, DI - MOVQ DX, SI - MOVQ $3603962101, BP - IMULQ BP, SI - RORQ $21, SI - ADDQ DI, SI - XORQ SI, DX - MOVQ DI, SI - MOVQ $817650473, BP - IMULQ BP, SI - RORQ $21, SI - ADDQ DX, SI - XORQ SI, DI - ADDQ DI, AX -after16: - CMPQ CX, $8 - JLT after8 - MOVQ 0(BX), DX - MOVQ $817650473, DI - IMULQ DI, DX - ADDQ DX, AX - ADDQ $8, BX - SUBQ $8, CX - MOVQ AX, DX - RORQ $55, DX - MOVQ $2729050939, DI - IMULQ DI, DX - XORQ DX, AX -after8: - CMPQ CX, $4 - JLT after4 - XORQ DX, DX - MOVL 0(BX), DX - MOVQ $817650473, DI - IMULQ DI, DX - ADDQ DX, AX - ADDQ $4, BX - SUBQ $4, CX - MOVQ AX, DX - RORQ $26, DX - MOVQ $2729050939, DI - IMULQ DI, DX - XORQ DX, AX -after4: - CMPQ CX, $2 - JLT after2 - XORQ DX, DX - MOVW 0(BX), DX - MOVQ $817650473, DI - IMULQ DI, DX - ADDQ DX, AX - ADDQ $2, BX - SUBQ $2, CX - MOVQ AX, DX - RORQ $48, DX - MOVQ $2729050939, DI - IMULQ DI, DX - XORQ DX, AX -after2: - CMPQ CX, $1 - JLT after1 - MOVBQZX 0(BX), BX - MOVQ $817650473, CX - IMULQ CX, BX - ADDQ BX, AX - MOVQ AX, BX - RORQ $37, BX - MOVQ $2729050939, CX - IMULQ CX, BX - XORQ BX, AX -after1: - MOVQ AX, BX - RORQ $28, BX - XORQ BX, AX - MOVQ $3603962101, BX - IMULQ BX, AX - MOVQ AX, BX - RORQ $29, BX - XORQ BX, AX - MOVQ AX, ret+24(FP) - RET diff --git a/vendor/github.com/dgryski/go-metro/metro_stub.go b/vendor/github.com/dgryski/go-metro/metro_stub.go deleted file mode 100644 index 86ddcb4705..0000000000 --- a/vendor/github.com/dgryski/go-metro/metro_stub.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !noasm,amd64 -// +build !gccgo - -package metro - -//go:generate python -m peachpy.x86_64 metro.py -S -o metro_amd64.s -mabi=goasm -//go:noescape - -func Hash64(buffer []byte, seed uint64) uint64 -func Hash64Str(buffer string, seed uint64) uint64 diff --git a/vendor/github.com/seiflotfy/cuckoofilter/.gitignore b/vendor/github.com/seiflotfy/cuckoofilter/.gitignore deleted file mode 100644 index 11b90db8d9..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -.idea diff --git a/vendor/github.com/seiflotfy/cuckoofilter/LICENSE b/vendor/github.com/seiflotfy/cuckoofilter/LICENSE deleted file mode 100644 index 58393c98c1..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Seif Lotfy - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/seiflotfy/cuckoofilter/README.md b/vendor/github.com/seiflotfy/cuckoofilter/README.md deleted file mode 100644 index 2a77fb393f..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/README.md +++ /dev/null @@ -1,62 +0,0 @@ -# Cuckoo Filter - -[![GoDoc](https://godoc.org/github.com/seiflotfy/cuckoofilter?status.svg)](https://godoc.org/github.com/seiflotfy/cuckoofilter) [![CodeHunt.io](https://img.shields.io/badge/vote-codehunt.io-02AFD1.svg)](http://codehunt.io/sub/cuckoo-filter/?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) - -Cuckoo filter is a Bloom filter replacement for approximated set-membership queries. While Bloom filters are well-known space-efficient data structures to serve queries like "if item x is in a set?", they do not support deletion. Their variances to enable deletion (like counting Bloom filters) usually require much more space. - -Cuckoo filters provide the flexibility to add and remove items dynamically. A cuckoo filter is based on cuckoo hashing (and therefore named as cuckoo filter). It is essentially a cuckoo hash table storing each key's fingerprint. Cuckoo hash tables can be highly compact, thus a cuckoo filter could use less space than conventional Bloom filters, for applications that require low false positive rates (< 3%). - -For details about the algorithm and citations please use this article for now - -["Cuckoo Filter: Better Than Bloom" by Bin Fan, Dave Andersen and Michael Kaminsky](https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf) - -## Implementation details - -The paper cited above leaves several parameters to choose. In this implementation - -1. Every element has 2 possible bucket indices -2. Buckets have a static size of 4 fingerprints -3. Fingerprints have a static size of 8 bits - -1 and 2 are suggested to be the optimum by the authors. The choice of 3 comes down to the desired false positive rate. Given a target false positive rate of `r` and a bucket size `b`, they suggest choosing the fingerprint size `f` using - - f >= log2(2b/r) bits - -With the 8 bit fingerprint size in this repository, you can expect `r ~= 0.03`. -[Other implementations](https://github.com/panmari/cuckoofilter) use 16 bit, which correspond to a false positive rate of `r ~= 0.0001`. - -## Example usage: -```go -package main - -import "fmt" -import cuckoo "github.com/seiflotfy/cuckoofilter" - -func main() { - cf := cuckoo.NewFilter(1000) - cf.InsertUnique([]byte("geeky ogre")) - - // Lookup a string (and it a miss) if it exists in the cuckoofilter - cf.Lookup([]byte("hello")) - - count := cf.Count() - fmt.Println(count) // count == 1 - - // Delete a string (and it a miss) - cf.Delete([]byte("hello")) - - count = cf.Count() - fmt.Println(count) // count == 1 - - // Delete a string (a hit) - cf.Delete([]byte("geeky ogre")) - - count = cf.Count() - fmt.Println(count) // count == 0 - - cf.Reset() // reset -} -``` - -## Documentation: -["Cuckoo Filter on GoDoc"](http://godoc.org/github.com/seiflotfy/cuckoofilter) diff --git a/vendor/github.com/seiflotfy/cuckoofilter/bucket.go b/vendor/github.com/seiflotfy/cuckoofilter/bucket.go deleted file mode 100644 index 4a83fc5030..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/bucket.go +++ /dev/null @@ -1,45 +0,0 @@ -package cuckoo - -type fingerprint byte - -type bucket [bucketSize]fingerprint - -const ( - nullFp = 0 - bucketSize = 4 -) - -func (b *bucket) insert(fp fingerprint) bool { - for i, tfp := range b { - if tfp == nullFp { - b[i] = fp - return true - } - } - return false -} - -func (b *bucket) delete(fp fingerprint) bool { - for i, tfp := range b { - if tfp == fp { - b[i] = nullFp - return true - } - } - return false -} - -func (b *bucket) getFingerprintIndex(fp fingerprint) int { - for i, tfp := range b { - if tfp == fp { - return i - } - } - return -1 -} - -func (b *bucket) reset() { - for i := range b { - b[i] = nullFp - } -} diff --git a/vendor/github.com/seiflotfy/cuckoofilter/cuckoofilter.go b/vendor/github.com/seiflotfy/cuckoofilter/cuckoofilter.go deleted file mode 100644 index ec0d246de2..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/cuckoofilter.go +++ /dev/null @@ -1,165 +0,0 @@ -package cuckoo - -import ( - "fmt" - "math/bits" - "math/rand" -) - -const maxCuckooCount = 500 - -// Filter is a probabilistic counter -type Filter struct { - buckets []bucket - count uint - bucketPow uint -} - -// NewFilter returns a new cuckoofilter with a given capacity. -// A capacity of 1000000 is a normal default, which allocates -// about ~1MB on 64-bit machines. -func NewFilter(capacity uint) *Filter { - capacity = getNextPow2(uint64(capacity)) / bucketSize - if capacity == 0 { - capacity = 1 - } - buckets := make([]bucket, capacity) - return &Filter{ - buckets: buckets, - count: 0, - bucketPow: uint(bits.TrailingZeros(capacity)), - } -} - -// Lookup returns true if data is in the counter -func (cf *Filter) Lookup(data []byte) bool { - i1, fp := getIndexAndFingerprint(data, cf.bucketPow) - if cf.buckets[i1].getFingerprintIndex(fp) > -1 { - return true - } - i2 := getAltIndex(fp, i1, cf.bucketPow) - return cf.buckets[i2].getFingerprintIndex(fp) > -1 -} - -// Reset ... -func (cf *Filter) Reset() { - for i := range cf.buckets { - cf.buckets[i].reset() - } - cf.count = 0 -} - -func randi(i1, i2 uint) uint { - if rand.Intn(2) == 0 { - return i1 - } - return i2 -} - -// Insert inserts data into the counter and returns true upon success -func (cf *Filter) Insert(data []byte) bool { - i1, fp := getIndexAndFingerprint(data, cf.bucketPow) - if cf.insert(fp, i1) { - return true - } - i2 := getAltIndex(fp, i1, cf.bucketPow) - if cf.insert(fp, i2) { - return true - } - return cf.reinsert(fp, randi(i1, i2)) -} - -// InsertUnique inserts data into the counter if not exists and returns true upon success -func (cf *Filter) InsertUnique(data []byte) bool { - if cf.Lookup(data) { - return false - } - return cf.Insert(data) -} - -func (cf *Filter) insert(fp fingerprint, i uint) bool { - if cf.buckets[i].insert(fp) { - cf.count++ - return true - } - return false -} - -func (cf *Filter) reinsert(fp fingerprint, i uint) bool { - for k := 0; k < maxCuckooCount; k++ { - j := rand.Intn(bucketSize) - oldfp := fp - fp = cf.buckets[i][j] - cf.buckets[i][j] = oldfp - - // look in the alternate location for that random element - i = getAltIndex(fp, i, cf.bucketPow) - if cf.insert(fp, i) { - return true - } - } - return false -} - -// Delete data from counter if exists and return if deleted or not -func (cf *Filter) Delete(data []byte) bool { - i1, fp := getIndexAndFingerprint(data, cf.bucketPow) - if cf.delete(fp, i1) { - return true - } - i2 := getAltIndex(fp, i1, cf.bucketPow) - return cf.delete(fp, i2) -} - -func (cf *Filter) delete(fp fingerprint, i uint) bool { - if cf.buckets[i].delete(fp) { - if cf.count > 0 { - cf.count-- - } - return true - } - return false -} - -// Count returns the number of items in the counter -func (cf *Filter) Count() uint { - return cf.count -} - -// Encode returns a byte slice representing a Cuckoofilter -func (cf *Filter) Encode() []byte { - bytes := make([]byte, len(cf.buckets)*bucketSize) - for i, b := range cf.buckets { - for j, f := range b { - index := (i * len(b)) + j - bytes[index] = byte(f) - } - } - return bytes -} - -// Decode returns a Cuckoofilter from a byte slice -func Decode(bytes []byte) (*Filter, error) { - var count uint - if len(bytes)%bucketSize != 0 { - return nil, fmt.Errorf("expected bytes to be multiple of %d, got %d", bucketSize, len(bytes)) - } - if len(bytes) == 0 { - return nil, fmt.Errorf("bytes can not be empty") - } - buckets := make([]bucket, len(bytes)/4) - for i, b := range buckets { - for j := range b { - index := (i * len(b)) + j - if bytes[index] != 0 { - buckets[i][j] = fingerprint(bytes[index]) - count++ - } - } - } - return &Filter{ - buckets: buckets, - count: count, - bucketPow: uint(bits.TrailingZeros(uint(len(buckets)))), - }, nil -} diff --git a/vendor/github.com/seiflotfy/cuckoofilter/doc.go b/vendor/github.com/seiflotfy/cuckoofilter/doc.go deleted file mode 100644 index 6f6cbf8281..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/doc.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -*/ - -/* -Package cuckoo provides a Cuckoo Filter, a Bloom filter replacement for approximated set-membership queries. - -While Bloom filters are well-known space-efficient data structures to serve queries like "if item x is in a set?", they do not support deletion. Their variances to enable deletion (like counting Bloom filters) usually require much more space. - -Cuckoo filters provide the flexibility to add and remove items dynamically. A cuckoo filter is based on cuckoo hashing (and therefore named as cuckoo filter). It is essentially a cuckoo hash table storing each key's fingerprint. Cuckoo hash tables can be highly compact, thus a cuckoo filter could use less space than conventional Bloom filters, for applications that require low false positive rates (< 3%). - -For details about the algorithm and citations please use this article: - -"Cuckoo Filter: Better Than Bloom" by Bin Fan, Dave Andersen and Michael Kaminsky -(https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf) - -Note: -This implementation uses a a static bucket size of 4 fingerprints and a fingerprint size of 1 byte based on my understanding of an optimal bucket/fingerprint/size ratio from the aforementioned paper.*/ -package cuckoo diff --git a/vendor/github.com/seiflotfy/cuckoofilter/scalable_cuckoofilter.go b/vendor/github.com/seiflotfy/cuckoofilter/scalable_cuckoofilter.go deleted file mode 100644 index 693184c9d4..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/scalable_cuckoofilter.go +++ /dev/null @@ -1,170 +0,0 @@ -package cuckoo - -import ( - "bytes" - "encoding/gob" -) - -const ( - DefaultLoadFactor = 0.9 - DefaultCapacity = 10000 -) - -type ScalableCuckooFilter struct { - filters []*Filter - loadFactor float32 - //when scale(last filter size * loadFactor >= capacity) get new filter capacity - scaleFactor func(capacity uint) uint -} - -type option func(*ScalableCuckooFilter) - -type Store struct { - Bytes [][]byte - LoadFactor float32 -} - -/* - by default option the grow capacity is: - capacity , total - 4096 4096 - 8192 12288 -16384 28672 -32768 61440 -65536 126,976 -*/ -func NewScalableCuckooFilter(opts ...option) *ScalableCuckooFilter { - sfilter := new(ScalableCuckooFilter) - for _, opt := range opts { - opt(sfilter) - } - configure(sfilter) - return sfilter -} - -func (sf *ScalableCuckooFilter) Lookup(data []byte) bool { - for _, filter := range sf.filters { - if filter.Lookup(data) { - return true - } - } - return false -} - -func (sf *ScalableCuckooFilter) Reset() { - for _, filter := range sf.filters { - filter.Reset() - } -} - -func (sf *ScalableCuckooFilter) Insert(data []byte) bool { - needScale := false - lastFilter := sf.filters[len(sf.filters)-1] - if (float32(lastFilter.count) / float32(len(lastFilter.buckets))) > sf.loadFactor { - needScale = true - } else { - b := lastFilter.Insert(data) - needScale = !b - } - if !needScale { - return true - } - newFilter := NewFilter(sf.scaleFactor(uint(len(lastFilter.buckets)))) - sf.filters = append(sf.filters, newFilter) - return newFilter.Insert(data) -} - -func (sf *ScalableCuckooFilter) InsertUnique(data []byte) bool { - if sf.Lookup(data) { - return false - } - return sf.Insert(data) -} - -func (sf *ScalableCuckooFilter) Delete(data []byte) bool { - for _, filter := range sf.filters { - if filter.Delete(data) { - return true - } - } - return false -} - -func (sf *ScalableCuckooFilter) Count() uint { - var sum uint - for _, filter := range sf.filters { - sum += filter.count - } - return sum - -} - -func (sf *ScalableCuckooFilter) Encode() []byte { - slice := make([][]byte, len(sf.filters)) - for i, filter := range sf.filters { - encode := filter.Encode() - slice[i] = encode - } - store := &Store{ - Bytes: slice, - LoadFactor: sf.loadFactor, - } - buf := bytes.NewBuffer(nil) - enc := gob.NewEncoder(buf) - err := enc.Encode(store) - if err != nil { - return nil - } - return buf.Bytes() -} - -func (sf *ScalableCuckooFilter) DecodeWithParam(fBytes []byte, opts ...option) (*ScalableCuckooFilter, error) { - instance, err := DecodeScalableFilter(fBytes) - if err != nil { - return nil, err - } - for _, opt := range opts { - opt(instance) - } - return instance, nil -} - -func DecodeScalableFilter(fBytes []byte) (*ScalableCuckooFilter, error) { - buf := bytes.NewBuffer(fBytes) - dec := gob.NewDecoder(buf) - store := &Store{} - err := dec.Decode(store) - if err != nil { - return nil, err - } - filterSize := len(store.Bytes) - instance := NewScalableCuckooFilter(func(filter *ScalableCuckooFilter) { - filter.filters = make([]*Filter, filterSize) - }, func(filter *ScalableCuckooFilter) { - filter.loadFactor = store.LoadFactor - }) - for i, oneBytes := range store.Bytes { - filter, err := Decode(oneBytes) - if err != nil { - return nil, err - } - instance.filters[i] = filter - } - return instance, nil - -} - -func configure(sfilter *ScalableCuckooFilter) { - if sfilter.loadFactor == 0 { - sfilter.loadFactor = DefaultLoadFactor - } - if sfilter.scaleFactor == nil { - sfilter.scaleFactor = func(currentSize uint) uint { - return currentSize * bucketSize * 2 - } - } - if sfilter.filters == nil { - initFilter := NewFilter(DefaultCapacity) - sfilter.filters = []*Filter{initFilter} - } -} diff --git a/vendor/github.com/seiflotfy/cuckoofilter/util.go b/vendor/github.com/seiflotfy/cuckoofilter/util.go deleted file mode 100644 index 2a0f65b130..0000000000 --- a/vendor/github.com/seiflotfy/cuckoofilter/util.go +++ /dev/null @@ -1,52 +0,0 @@ -package cuckoo - -import ( - metro "github.com/dgryski/go-metro" -) - -var ( - altHash = [256]uint{} - masks = [65]uint{} -) - -func init() { - for i := 0; i < 256; i++ { - altHash[i] = (uint(metro.Hash64([]byte{byte(i)}, 1337))) - } - for i := uint(0); i <= 64; i++ { - masks[i] = (1 << i) - 1 - } -} - -func getAltIndex(fp fingerprint, i uint, bucketPow uint) uint { - mask := masks[bucketPow] - hash := altHash[fp] & mask - return (i & mask) ^ hash -} - -func getFingerprint(hash uint64) byte { - // Use least significant bits for fingerprint. - fp := byte(hash%255 + 1) - return fp -} - -// getIndicesAndFingerprint returns the 2 bucket indices and fingerprint to be used -func getIndexAndFingerprint(data []byte, bucketPow uint) (uint, fingerprint) { - hash := metro.Hash64(data, 1337) - fp := getFingerprint(hash) - // Use most significant bits for deriving index. - i1 := uint(hash>>32) & masks[bucketPow] - return i1, fingerprint(fp) -} - -func getNextPow2(n uint64) uint { - n-- - n |= n >> 1 - n |= n >> 2 - n |= n >> 4 - n |= n >> 8 - n |= n >> 16 - n |= n >> 32 - n++ - return uint(n) -} diff --git a/vendor/github.com/thanos-io/objstore/CHANGELOG.md b/vendor/github.com/thanos-io/objstore/CHANGELOG.md index 23e92b8c16..0dc65033d0 100644 --- a/vendor/github.com/thanos-io/objstore/CHANGELOG.md +++ b/vendor/github.com/thanos-io/objstore/CHANGELOG.md @@ -14,6 +14,8 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#33](https://github.com/thanos-io/objstore/pull/33) Tracing: Add `ContextWithTracer()` to inject the tracer into the context. - [#34](https://github.com/thanos-io/objstore/pull/34) Fix ignored options when creating shared credential Azure client. - [#62](https://github.com/thanos-io/objstore/pull/62) S3: Fix ignored context cancellation in `Iter` method. +- [#77](https://github.com/thanos-io/objstore/pull/77) Fix buckets wrapped with metrics from being unable to determine object sizes in `Upload`. +- [#78](https://github.com/thanos-io/objstore/pull/78) S3: Fix possible concurrent modification of the PutUserMetadata map. ### Added - [#15](https://github.com/thanos-io/objstore/pull/15) Add Oracle Cloud Infrastructure Object Storage Bucket support. @@ -27,7 +29,11 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#61](https://github.com/thanos-io/objstore/pull/61) Add OpenTelemetry TracingBucket. > This also changes the behaviour of `client.NewBucket`. Now it returns, uninstrumented and untraced bucket. You can combine `objstore.WrapWithMetrics` and `tracing/{opentelemetry,opentracing}.WrapWithTraces` to have old behavior. +- [#69](https://github.com/thanos-io/objstore/pull/69) [#66](https://github.com/thanos-io/objstore/pull/66) Add `objstore_bucket_operation_transferred_bytes` that counts the number of total bytes read from the bucket operation Get/GetRange and also counts the number of total bytes written to the bucket operation Upload. - [#64](https://github.com/thanos-io/objstore/pull/64) OCI: OKE Workload Identity support. +- [#73](https://github.com/thanos-io/objstore/pull/73) Аdded file path to erros from DownloadFile +- [#51](https://github.com/thanos-io/objstore/pull/51) Azure: Support using connection string authentication. +- [#76](https://github.com/thanos-io/objstore/pull/76) GCS: Query for object names only in `Iter` to possibly improve performance when listing objects. ### Changed - [#38](https://github.com/thanos-io/objstore/pull/38) *: Upgrade minio-go version to `v7.0.45`. diff --git a/vendor/github.com/thanos-io/objstore/README.md b/vendor/github.com/thanos-io/objstore/README.md index c9dbe34d55..2a1552714c 100644 --- a/vendor/github.com/thanos-io/objstore/README.md +++ b/vendor/github.com/thanos-io/objstore/README.md @@ -419,6 +419,7 @@ type: AZURE config: storage_account: "" storage_account_key: "" + storage_connection_string: "" container: "" endpoint: "" user_assigned_id: "" @@ -454,6 +455,8 @@ If `msi_resource` is used, authentication is done via system-assigned managed id If `user_assigned_id` is used, authentication is done via user-assigned managed identity. When using `user_assigned_id` the `msi_resource` defaults to `https://.` +If `storage_connection_string` is set, the values of `storage_account` and `endpoint` values will not be used. Use this method over `storage_account_key` if you need to authenticate via a SAS token. + The generic `max_retries` will be used as value for the `pipeline_config`'s `max_tries` and `reader_config`'s `max_retry_requests`. For more control, `max_retries` could be ignored (0) and one could set specific retry values. ##### OpenStack Swift diff --git a/vendor/github.com/thanos-io/objstore/objstore.go b/vendor/github.com/thanos-io/objstore/objstore.go index b9b56bf4fa..20723704df 100644 --- a/vendor/github.com/thanos-io/objstore/objstore.go +++ b/vendor/github.com/thanos-io/objstore/objstore.go @@ -315,7 +315,7 @@ func DownloadFile(ctx context.Context, logger log.Logger, bkt BucketReader, src, f, err := os.Create(dst) if err != nil { - return errors.Wrap(err, "create file") + return errors.Wrapf(err, "create file %s", dst) } defer func() { if err != nil { @@ -327,7 +327,7 @@ func DownloadFile(ctx context.Context, logger log.Logger, bkt BucketReader, src, defer logerrcapture.Do(logger, f.Close, "close block's output file") if _, err = io.Copy(f, rc); err != nil { - return errors.Wrap(err, "copy object to file") + return errors.Wrapf(err, "copy object to file %s", src) } return nil } @@ -458,11 +458,12 @@ func WrapWithMetrics(b Bucket, reg prometheus.Registerer, name string) *metricBu bkt.opsDuration.WithLabelValues(op) bkt.opsFetchedBytes.WithLabelValues(op) } - // fetched bytes only relevant for get and getrange + + // fetched bytes only relevant for get, getrange and upload for _, op := range []string{ OpGet, OpGetRange, - // TODO: Add uploads + OpUpload, } { bkt.opsTransferredBytes.WithLabelValues(op) } @@ -592,15 +593,25 @@ func (b *metricBucket) Upload(ctx context.Context, name string, r io.Reader) err const op = OpUpload b.ops.WithLabelValues(op).Inc() - start := time.Now() - if err := b.bkt.Upload(ctx, name, r); err != nil { + trc := newTimingReadCloser( + NopCloserWithSize(r), + op, + b.opsDuration, + b.opsFailures, + b.isOpFailureExpected, + nil, + b.opsTransferredBytes, + ) + defer trc.Close() + err := b.bkt.Upload(ctx, name, trc) + if err != nil { if !b.isOpFailureExpected(err) && ctx.Err() != context.Canceled { b.opsFailures.WithLabelValues(op).Inc() } return err } b.lastSuccessfulUploadTime.SetToCurrentTime() - b.opsDuration.WithLabelValues(op).Observe(time.Since(start).Seconds()) + return nil } @@ -692,7 +703,10 @@ func (rc *timingReadCloser) Close() error { func (rc *timingReadCloser) Read(b []byte) (n int, err error) { n, err = rc.ReadCloser.Read(b) - rc.fetchedBytes.WithLabelValues(rc.op).Add(float64(n)) + if rc.fetchedBytes != nil { + rc.fetchedBytes.WithLabelValues(rc.op).Add(float64(n)) + } + rc.readBytes += int64(n) // Report metric just once. if !rc.alreadyGotErr && err != nil && err != io.EOF { diff --git a/vendor/github.com/thanos-io/objstore/providers/azure/azure.go b/vendor/github.com/thanos-io/objstore/providers/azure/azure.go index 376fb6290f..a72f30528e 100644 --- a/vendor/github.com/thanos-io/objstore/providers/azure/azure.go +++ b/vendor/github.com/thanos-io/objstore/providers/azure/azure.go @@ -44,15 +44,16 @@ var DefaultConfig = Config{ // Config Azure storage configuration. type Config struct { - StorageAccountName string `yaml:"storage_account"` - StorageAccountKey string `yaml:"storage_account_key"` - ContainerName string `yaml:"container"` - Endpoint string `yaml:"endpoint"` - UserAssignedID string `yaml:"user_assigned_id"` - MaxRetries int `yaml:"max_retries"` - ReaderConfig ReaderConfig `yaml:"reader_config"` - PipelineConfig PipelineConfig `yaml:"pipeline_config"` - HTTPConfig exthttp.HTTPConfig `yaml:"http_config"` + StorageAccountName string `yaml:"storage_account"` + StorageAccountKey string `yaml:"storage_account_key"` + StorageConnectionString string `yaml:"storage_connection_string"` + ContainerName string `yaml:"container"` + Endpoint string `yaml:"endpoint"` + UserAssignedID string `yaml:"user_assigned_id"` + MaxRetries int `yaml:"max_retries"` + ReaderConfig ReaderConfig `yaml:"reader_config"` + PipelineConfig PipelineConfig `yaml:"pipeline_config"` + HTTPConfig exthttp.HTTPConfig `yaml:"http_config"` // Deprecated: Is automatically set by the Azure SDK. MSIResource string `yaml:"msi_resource"` @@ -76,6 +77,14 @@ func (conf *Config) validate() error { errMsg = append(errMsg, "user_assigned_id cannot be set when using storage_account_key authentication") } + if conf.UserAssignedID != "" && conf.StorageConnectionString != "" { + errMsg = append(errMsg, "user_assigned_id cannot be set when using storage_connection_string authentication") + } + + if conf.StorageAccountKey != "" && conf.StorageConnectionString != "" { + errMsg = append(errMsg, "storage_account_key and storage_connection_string cannot both be set") + } + if conf.StorageAccountName == "" { errMsg = append(errMsg, "storage_account_name is required but not configured") } diff --git a/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go b/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go index b76154d6c2..7b4a5fbe51 100644 --- a/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go +++ b/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go @@ -38,6 +38,16 @@ func getContainerClient(conf Config) (*container.Client, error) { Transport: &http.Client{Transport: dt}, }, } + + // Use connection string if set + if conf.StorageConnectionString != "" { + containerClient, err := container.NewClientFromConnectionString(conf.StorageConnectionString, conf.ContainerName, opt) + if err != nil { + return nil, err + } + return containerClient, nil + } + containerURL := fmt.Sprintf("https://%s.%s/%s", conf.StorageAccountName, conf.Endpoint, conf.ContainerName) // Use shared keys if set diff --git a/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go b/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go index 5ea45c7e97..ad305d6e15 100644 --- a/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go +++ b/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go @@ -108,10 +108,16 @@ func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, opt delimiter = "" } - it := b.bkt.Objects(ctx, &storage.Query{ + query := &storage.Query{ Prefix: dir, Delimiter: delimiter, - }) + } + err := query.SetAttrSelection([]string{"Name"}) + if err != nil { + return err + } + + it := b.bkt.Objects(ctx, query) for { select { case <-ctx.Done(): diff --git a/vendor/github.com/thanos-io/objstore/providers/s3/s3.go b/vendor/github.com/thanos-io/objstore/providers/s3/s3.go index f92d397398..83e3a2de76 100644 --- a/vendor/github.com/thanos-io/objstore/providers/s3/s3.go +++ b/vendor/github.com/thanos-io/objstore/providers/s3/s3.go @@ -492,6 +492,13 @@ func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { if size < int64(partSize) { partSize = 0 } + + // Cloning map since minio may modify it + userMetadata := make(map[string]string, len(b.putUserMetadata)) + for k, v := range b.putUserMetadata { + userMetadata[k] = v + } + if _, err := b.client.PutObject( ctx, b.name, @@ -501,7 +508,7 @@ func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { minio.PutObjectOptions{ PartSize: partSize, ServerSideEncryption: sse, - UserMetadata: b.putUserMetadata, + UserMetadata: userMetadata, StorageClass: b.storageClass, // 4 is what minio-go have as the default. To be certain we do micro benchmark before any changes we // ensure we pin this number to four. diff --git a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/async_op.go b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/async_op.go index b2b977f908..524d2fdba2 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/async_op.go +++ b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/async_op.go @@ -5,8 +5,6 @@ package cacheutil import ( "sync" - - "github.com/pkg/errors" ) type asyncOperationProcessor struct { @@ -54,13 +52,11 @@ func (p *asyncOperationProcessor) asyncQueueProcessLoop() { } } -var errAsyncBufferFull = errors.New("the async buffer is full") - func (p *asyncOperationProcessor) enqueueAsync(op func()) error { select { case p.asyncQueue <- op: return nil default: - return errAsyncBufferFull + return errMemcachedAsyncBufferFull } } diff --git a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go index f34f97abd7..55d07d9d5a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go +++ b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go @@ -400,7 +400,7 @@ func (c *memcachedClient) SetAsync(key string, value []byte, ttl time.Duration) c.duration.WithLabelValues(opSet).Observe(time.Since(start).Seconds()) }) - if err == errMemcachedAsyncBufferFull { + if errors.Is(err, errMemcachedAsyncBufferFull) { c.skipped.WithLabelValues(opSet, reasonAsyncBufferFull).Inc() level.Debug(c.logger).Log("msg", "failed to store item to memcached because the async buffer is full", "err", err, "size", len(c.p.asyncQueue)) return nil diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go index 78b615f904..51d895058a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go @@ -372,8 +372,17 @@ func downsampleRawLoop(data []sample, resolution int64, numChunks int) []chunks. for ; j < len(data) && data[j].t <= curW; j++ { } - batch := data[:j] + batch := make([]sample, 0, j) + for _, s := range data[:j] { + if math.IsNaN(s.v) { + continue + } + batch = append(batch, s) + } data = data[j:] + if len(batch) == 0 { + continue + } ab := newAggrChunkBuilder() diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go index 4a8eae4572..5a6f31c42d 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -59,7 +59,6 @@ import ( "github.com/thanos-io/thanos/pkg/store/hintspb" "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" - "github.com/thanos-io/thanos/pkg/stringset" "github.com/thanos-io/thanos/pkg/strutil" "github.com/thanos-io/thanos/pkg/tenancy" "github.com/thanos-io/thanos/pkg/tracing" @@ -148,9 +147,15 @@ type bucketStoreMetrics struct { cachedPostingsOriginalSizeBytes prometheus.Counter cachedPostingsCompressedSizeBytes prometheus.Counter - seriesFetchDuration prometheus.Histogram - postingsFetchDuration prometheus.Histogram - chunkFetchDuration prometheus.Histogram + seriesFetchDuration prometheus.Histogram + // Counts time for fetching series across all batches. + seriesFetchDurationSum prometheus.Histogram + postingsFetchDuration prometheus.Histogram + // chunkFetchDuration counts total time loading chunks, but since we spawn + // multiple goroutines the actual latency is usually much lower than it. + chunkFetchDuration prometheus.Histogram + // Actual absolute total time for loading chunks. + chunkFetchDurationSum prometheus.Histogram } func newBucketStoreMetrics(reg prometheus.Registerer) *bucketStoreMetrics { @@ -289,6 +294,12 @@ func newBucketStoreMetrics(reg prometheus.Registerer) *bucketStoreMetrics { Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, }) + m.seriesFetchDurationSum = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + Name: "thanos_bucket_store_series_fetch_duration_sum_seconds", + Help: "The total time it takes to fetch series to respond to a request sent to a store gateway across all series batches. It includes both the time to fetch it from the cache and from storage in case of cache misses.", + Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, + }) + m.postingsFetchDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ Name: "thanos_bucket_store_postings_fetch_duration_seconds", Help: "The time it takes to fetch postings to respond to a request sent to a store gateway. It includes both the time to fetch it from the cache and from storage in case of cache misses.", @@ -297,7 +308,13 @@ func newBucketStoreMetrics(reg prometheus.Registerer) *bucketStoreMetrics { m.chunkFetchDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ Name: "thanos_bucket_store_chunks_fetch_duration_seconds", - Help: "The total time spent fetching chunks within a single request a store gateway.", + Help: "The total time spent fetching chunks within a single request for one block.", + Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, + }) + + m.chunkFetchDurationSum = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + Name: "thanos_bucket_store_chunks_fetch_duration_sum_seconds", + Help: "The total absolute time spent fetching chunks within a single request for one block.", Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, }) @@ -387,9 +404,6 @@ type BucketStore struct { enabledLazyExpandedPostings bool - bmtx sync.Mutex - labelNamesSet stringset.Set - blockEstimatedMaxSeriesFunc BlockEstimator blockEstimatedMaxChunkFunc BlockEstimator } @@ -543,7 +557,6 @@ func NewBucketStore( enableSeriesResponseHints: enableSeriesResponseHints, enableChunkHashCalculation: enableChunkHashCalculation, seriesBatchSize: SeriesBatchSize, - labelNamesSet: stringset.AllStrings(), } for _, option := range options { @@ -931,11 +944,13 @@ type blockSeriesClient struct { lazyExpandedPostingSizeBytes prometheus.Counter lazyExpandedPostingSeriesOverfetchedSizeBytes prometheus.Counter - skipChunks bool - shardMatcher *storepb.ShardMatcher - blockMatchers []*labels.Matcher - calculateChunkHash bool - chunkFetchDuration prometheus.Histogram + skipChunks bool + shardMatcher *storepb.ShardMatcher + blockMatchers []*labels.Matcher + calculateChunkHash bool + seriesFetchDurationSum prometheus.Histogram + chunkFetchDuration prometheus.Histogram + chunkFetchDurationSum prometheus.Histogram // Internal state. i uint64 @@ -960,7 +975,9 @@ func newBlockSeriesClient( shardMatcher *storepb.ShardMatcher, calculateChunkHash bool, batchSize int, + seriesFetchDurationSum prometheus.Histogram, chunkFetchDuration prometheus.Histogram, + chunkFetchDurationSum prometheus.Histogram, extLsetToRemove map[string]struct{}, lazyExpandedPostingEnabled bool, lazyExpandedPostingsCount prometheus.Counter, @@ -983,14 +1000,16 @@ func newBlockSeriesClient( extLset: extLset, extLsetToRemove: extLsetToRemove, - mint: req.MinTime, - maxt: req.MaxTime, - indexr: b.indexReader(), - chunkr: chunkr, - chunksLimiter: limiter, - bytesLimiter: bytesLimiter, - skipChunks: req.SkipChunks, - chunkFetchDuration: chunkFetchDuration, + mint: req.MinTime, + maxt: req.MaxTime, + indexr: b.indexReader(), + chunkr: chunkr, + chunksLimiter: limiter, + bytesLimiter: bytesLimiter, + skipChunks: req.SkipChunks, + seriesFetchDurationSum: seriesFetchDurationSum, + chunkFetchDuration: chunkFetchDuration, + chunkFetchDurationSum: chunkFetchDurationSum, lazyExpandedPostingEnabled: lazyExpandedPostingEnabled, lazyExpandedPostingsCount: lazyExpandedPostingsCount, @@ -1079,8 +1098,10 @@ func (b *blockSeriesClient) Recv() (*storepb.SeriesResponse, error) { } if len(b.entries) == 0 { + b.seriesFetchDurationSum.Observe(b.indexr.stats.SeriesDownloadLatencySum.Seconds()) if b.chunkr != nil { b.chunkFetchDuration.Observe(b.chunkr.stats.ChunksFetchDurationSum.Seconds()) + b.chunkFetchDurationSum.Observe(b.chunkr.stats.ChunksDownloadLatencySum.Seconds()) } return nil, io.EOF } @@ -1334,7 +1355,8 @@ func debugFoundBlockSetOverview(logger log.Logger, mint, maxt, maxResolutionMill // Series implements the storepb.StoreServer interface. func (s *BucketStore) Series(req *storepb.SeriesRequest, seriesSrv storepb.Store_SeriesServer) (err error) { - srv := newFlushableServer(seriesSrv, s.LabelNamesSet(), req.WithoutReplicaLabels) + srv := newFlushableServer(seriesSrv, sortingStrategyNone) + if s.queryGate != nil { tracing.DoInSpan(srv.Context(), "store_query_gate_ismyturn", func(ctx context.Context) { err = s.queryGate.Start(srv.Context()) @@ -1430,7 +1452,9 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, seriesSrv storepb.Store shardMatcher, s.enableChunkHashCalculation, s.seriesBatchSize, + s.metrics.seriesFetchDurationSum, s.metrics.chunkFetchDuration, + s.metrics.chunkFetchDurationSum, extLsetToRemove, s.enabledLazyExpandedPostings, s.metrics.lazyExpandedPostingsCount, @@ -1464,44 +1488,19 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, seriesSrv storepb.Store return errors.Wrapf(err, "fetch postings for block %s", blk.meta.ULID) } - // If we have inner replica labels we need to resort. - s.mtx.Lock() - needsEagerRetrival := len(req.WithoutReplicaLabels) > 0 && s.labelNamesSet.HasAny(req.WithoutReplicaLabels) - s.mtx.Unlock() - - var resp respSet - if needsEagerRetrival { - labelsToRemove := make(map[string]struct{}) - for _, replicaLabel := range req.WithoutReplicaLabels { - labelsToRemove[replicaLabel] = struct{}{} - } - resp = newEagerRespSet( - srv.Context(), - span, - 10*time.Minute, - blk.meta.ULID.String(), - []labels.Labels{blk.extLset}, - onClose, - blockClient, - shardMatcher, - false, - s.metrics.emptyPostingCount, - labelsToRemove, - ) - } else { - resp = newLazyRespSet( - srv.Context(), - span, - 10*time.Minute, - blk.meta.ULID.String(), - []labels.Labels{blk.extLset}, - onClose, - blockClient, - shardMatcher, - false, - s.metrics.emptyPostingCount, - ) - } + resp := newEagerRespSet( + srv.Context(), + span, + 10*time.Minute, + blk.meta.ULID.String(), + []labels.Labels{blk.extLset}, + onClose, + blockClient, + shardMatcher, + false, + s.metrics.emptyPostingCount, + nil, + ) mtx.Lock() respSets = append(respSets, resp) @@ -1736,7 +1735,9 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq nil, true, SeriesBatchSize, - s.metrics.chunkFetchDuration, + s.metrics.seriesFetchDurationSum, + nil, + nil, nil, s.enabledLazyExpandedPostings, s.metrics.lazyExpandedPostingsCount, @@ -1814,38 +1815,6 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq }, nil } -func (s *BucketStore) UpdateLabelNames() { - s.mtx.RLock() - defer s.mtx.RUnlock() - - newSet := stringset.New() - for _, b := range s.blocks { - labelNames, err := b.indexHeaderReader.LabelNames() - if err != nil { - level.Warn(s.logger).Log("msg", "error getting label names", "block", b.meta.ULID, "err", err.Error()) - s.updateLabelNamesSet(stringset.AllStrings()) - return - } - for _, l := range labelNames { - newSet.Insert(l) - } - } - s.updateLabelNamesSet(newSet) -} - -func (s *BucketStore) updateLabelNamesSet(newSet stringset.Set) { - s.bmtx.Lock() - s.labelNamesSet = newSet - s.bmtx.Unlock() -} - -func (b *BucketStore) LabelNamesSet() stringset.Set { - b.bmtx.Lock() - defer b.bmtx.Unlock() - - return b.labelNamesSet -} - func (b *bucketBlock) FilterExtLabelsMatchers(matchers []*labels.Matcher) ([]*labels.Matcher, bool) { // We filter external labels from matchers so we won't try to match series on them. var result []*labels.Matcher @@ -1969,7 +1938,9 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR nil, true, SeriesBatchSize, - s.metrics.chunkFetchDuration, + s.metrics.seriesFetchDurationSum, + nil, + nil, nil, s.enabledLazyExpandedPostings, s.metrics.lazyExpandedPostingsCount, @@ -3073,7 +3044,10 @@ func (it *bigEndianPostings) length() int { func (r *bucketIndexReader) PreloadSeries(ctx context.Context, ids []storage.SeriesRef, bytesLimiter BytesLimiter) error { timer := prometheus.NewTimer(r.block.metrics.seriesFetchDuration) - defer timer.ObserveDuration() + defer func() { + d := timer.ObserveDuration() + r.stats.SeriesDownloadLatencySum += d + }() // Load series from cache, overwriting the list of ids to preload // with the missing ones. @@ -3391,7 +3365,10 @@ func (r *bucketChunkReader) load(ctx context.Context, res []seriesEntry, aggrs [ r.loadingChunks = true r.loadingChunksMtx.Unlock() + begin := time.Now() defer func() { + r.stats.ChunksDownloadLatencySum += time.Since(begin) + r.loadingChunksMtx.Lock() r.loadingChunks = false r.loadingChunksMtx.Unlock() @@ -3620,19 +3597,21 @@ type queryStats struct { cachedPostingsDecompressionErrors int CachedPostingsDecompressionTimeSum time.Duration - seriesTouched int - SeriesTouchedSizeSum units.Base2Bytes - seriesFetched int - SeriesFetchedSizeSum units.Base2Bytes - seriesFetchCount int - SeriesFetchDurationSum time.Duration - - chunksTouched int - ChunksTouchedSizeSum units.Base2Bytes - chunksFetched int - ChunksFetchedSizeSum units.Base2Bytes - chunksFetchCount int - ChunksFetchDurationSum time.Duration + seriesTouched int + SeriesTouchedSizeSum units.Base2Bytes + seriesFetched int + SeriesFetchedSizeSum units.Base2Bytes + seriesFetchCount int + SeriesFetchDurationSum time.Duration + SeriesDownloadLatencySum time.Duration + + chunksTouched int + ChunksTouchedSizeSum units.Base2Bytes + chunksFetched int + ChunksFetchedSizeSum units.Base2Bytes + chunksFetchCount int + ChunksFetchDurationSum time.Duration + ChunksDownloadLatencySum time.Duration GetAllDuration time.Duration mergedSeriesCount int @@ -3668,6 +3647,7 @@ func (s queryStats) merge(o *queryStats) *queryStats { s.SeriesFetchedSizeSum += o.SeriesFetchedSizeSum s.seriesFetchCount += o.seriesFetchCount s.SeriesFetchDurationSum += o.SeriesFetchDurationSum + s.SeriesDownloadLatencySum += o.SeriesDownloadLatencySum s.chunksTouched += o.chunksTouched s.ChunksTouchedSizeSum += o.ChunksTouchedSizeSum @@ -3675,6 +3655,7 @@ func (s queryStats) merge(o *queryStats) *queryStats { s.ChunksFetchedSizeSum += o.ChunksFetchedSizeSum s.chunksFetchCount += o.chunksFetchCount s.ChunksFetchDurationSum += o.ChunksFetchDurationSum + s.ChunksDownloadLatencySum += o.ChunksDownloadLatencySum s.GetAllDuration += o.GetAllDuration s.mergedSeriesCount += o.mergedSeriesCount @@ -3708,6 +3689,8 @@ func (s queryStats) toHints() *hintspb.QueryStats { MergedSeriesCount: int64(s.mergedSeriesCount), MergedChunksCount: int64(s.mergedChunksCount), DataDownloadedSizeSum: int64(s.DataDownloadedSizeSum), + GetAllDuration: s.GetAllDuration, + MergeDuration: s.MergeDuration, } } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/flushable.go b/vendor/github.com/thanos-io/thanos/pkg/store/flushable.go index c41b67d152..e6cadfbea9 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/flushable.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/flushable.go @@ -9,24 +9,35 @@ import ( "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" - "github.com/thanos-io/thanos/pkg/stringset" +) + +type sortingStrategy uint64 + +const ( + sortingStrategyStore sortingStrategy = iota + 1 + sortingStrategyNone ) // flushableServer is an extension of storepb.Store_SeriesServer with a Flush method. type flushableServer interface { storepb.Store_SeriesServer + Flush() error } func newFlushableServer( upstream storepb.Store_SeriesServer, - labelNames stringset.Set, - replicaLabels []string, + sortingsortingStrategy sortingStrategy, ) flushableServer { - if labelNames.HasAny(replicaLabels) { + switch sortingsortingStrategy { + case sortingStrategyStore: return &resortingServer{Store_SeriesServer: upstream} + case sortingStrategyNone: + return &passthroughServer{Store_SeriesServer: upstream} + default: + // should not happen. + panic("unexpected sorting strategy") } - return &passthroughServer{Store_SeriesServer: upstream} } // passthroughServer is a flushableServer that forwards all data to diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/custom.go b/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/custom.go index 9d7da86e94..bf82d245e2 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/custom.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/custom.go @@ -47,4 +47,7 @@ func (m *QueryStats) Merge(other *QueryStats) { m.ChunksFetchedSizeSum += other.ChunksFetchedSizeSum m.ChunksTouched += other.ChunksTouched m.ChunksTouchedSizeSum += other.ChunksTouchedSizeSum + + m.GetAllDuration += other.GetAllDuration + m.MergeDuration += other.MergeDuration } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/hints.pb.go b/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/hints.pb.go index 457793cf6a..2098d7489c 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/hints.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/hints.pb.go @@ -5,12 +5,16 @@ package hintspb import ( fmt "fmt" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + io "io" math "math" math_bits "math/bits" + time "time" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" storepb "github.com/thanos-io/thanos/pkg/store/storepb" ) @@ -18,6 +22,7 @@ import ( var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +var _ = time.Kitchen // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. @@ -301,26 +306,28 @@ var xxx_messageInfo_LabelValuesResponseHints proto.InternalMessageInfo // / QueryStats fields are unstable and might change in the future. type QueryStats struct { - BlocksQueried int64 `protobuf:"varint,1,opt,name=blocks_queried,json=blocksQueried,proto3" json:"blocks_queried,omitempty"` - MergedSeriesCount int64 `protobuf:"varint,2,opt,name=merged_series_count,json=mergedSeriesCount,proto3" json:"merged_series_count,omitempty"` - MergedChunksCount int64 `protobuf:"varint,3,opt,name=merged_chunks_count,json=mergedChunksCount,proto3" json:"merged_chunks_count,omitempty"` - PostingsTouched int64 `protobuf:"varint,4,opt,name=postings_touched,json=postingsTouched,proto3" json:"postings_touched,omitempty"` - PostingsTouchedSizeSum int64 `protobuf:"varint,5,opt,name=postings_touched_size_sum,json=postingsTouchedSizeSum,proto3" json:"postings_touched_size_sum,omitempty"` - PostingsToFetch int64 `protobuf:"varint,6,opt,name=postings_to_fetch,json=postingsToFetch,proto3" json:"postings_to_fetch,omitempty"` - PostingsFetched int64 `protobuf:"varint,7,opt,name=postings_fetched,json=postingsFetched,proto3" json:"postings_fetched,omitempty"` - PostingsFetchedSizeSum int64 `protobuf:"varint,8,opt,name=postings_fetched_size_sum,json=postingsFetchedSizeSum,proto3" json:"postings_fetched_size_sum,omitempty"` - PostingsFetchCount int64 `protobuf:"varint,9,opt,name=postings_fetch_count,json=postingsFetchCount,proto3" json:"postings_fetch_count,omitempty"` - SeriesTouched int64 `protobuf:"varint,10,opt,name=series_touched,json=seriesTouched,proto3" json:"series_touched,omitempty"` - SeriesTouchedSizeSum int64 `protobuf:"varint,11,opt,name=series_touched_size_sum,json=seriesTouchedSizeSum,proto3" json:"series_touched_size_sum,omitempty"` - SeriesFetched int64 `protobuf:"varint,12,opt,name=series_fetched,json=seriesFetched,proto3" json:"series_fetched,omitempty"` - SeriesFetchedSizeSum int64 `protobuf:"varint,13,opt,name=series_fetched_size_sum,json=seriesFetchedSizeSum,proto3" json:"series_fetched_size_sum,omitempty"` - SeriesFetchCount int64 `protobuf:"varint,14,opt,name=series_fetch_count,json=seriesFetchCount,proto3" json:"series_fetch_count,omitempty"` - ChunksTouched int64 `protobuf:"varint,15,opt,name=chunks_touched,json=chunksTouched,proto3" json:"chunks_touched,omitempty"` - ChunksTouchedSizeSum int64 `protobuf:"varint,16,opt,name=chunks_touched_size_sum,json=chunksTouchedSizeSum,proto3" json:"chunks_touched_size_sum,omitempty"` - ChunksFetched int64 `protobuf:"varint,17,opt,name=chunks_fetched,json=chunksFetched,proto3" json:"chunks_fetched,omitempty"` - ChunksFetchedSizeSum int64 `protobuf:"varint,18,opt,name=chunks_fetched_size_sum,json=chunksFetchedSizeSum,proto3" json:"chunks_fetched_size_sum,omitempty"` - ChunksFetchCount int64 `protobuf:"varint,19,opt,name=chunks_fetch_count,json=chunksFetchCount,proto3" json:"chunks_fetch_count,omitempty"` - DataDownloadedSizeSum int64 `protobuf:"varint,20,opt,name=data_downloaded_size_sum,json=dataDownloadedSizeSum,proto3" json:"data_downloaded_size_sum,omitempty"` + BlocksQueried int64 `protobuf:"varint,1,opt,name=blocks_queried,json=blocksQueried,proto3" json:"blocks_queried,omitempty"` + MergedSeriesCount int64 `protobuf:"varint,2,opt,name=merged_series_count,json=mergedSeriesCount,proto3" json:"merged_series_count,omitempty"` + MergedChunksCount int64 `protobuf:"varint,3,opt,name=merged_chunks_count,json=mergedChunksCount,proto3" json:"merged_chunks_count,omitempty"` + PostingsTouched int64 `protobuf:"varint,4,opt,name=postings_touched,json=postingsTouched,proto3" json:"postings_touched,omitempty"` + PostingsTouchedSizeSum int64 `protobuf:"varint,5,opt,name=postings_touched_size_sum,json=postingsTouchedSizeSum,proto3" json:"postings_touched_size_sum,omitempty"` + PostingsToFetch int64 `protobuf:"varint,6,opt,name=postings_to_fetch,json=postingsToFetch,proto3" json:"postings_to_fetch,omitempty"` + PostingsFetched int64 `protobuf:"varint,7,opt,name=postings_fetched,json=postingsFetched,proto3" json:"postings_fetched,omitempty"` + PostingsFetchedSizeSum int64 `protobuf:"varint,8,opt,name=postings_fetched_size_sum,json=postingsFetchedSizeSum,proto3" json:"postings_fetched_size_sum,omitempty"` + PostingsFetchCount int64 `protobuf:"varint,9,opt,name=postings_fetch_count,json=postingsFetchCount,proto3" json:"postings_fetch_count,omitempty"` + SeriesTouched int64 `protobuf:"varint,10,opt,name=series_touched,json=seriesTouched,proto3" json:"series_touched,omitempty"` + SeriesTouchedSizeSum int64 `protobuf:"varint,11,opt,name=series_touched_size_sum,json=seriesTouchedSizeSum,proto3" json:"series_touched_size_sum,omitempty"` + SeriesFetched int64 `protobuf:"varint,12,opt,name=series_fetched,json=seriesFetched,proto3" json:"series_fetched,omitempty"` + SeriesFetchedSizeSum int64 `protobuf:"varint,13,opt,name=series_fetched_size_sum,json=seriesFetchedSizeSum,proto3" json:"series_fetched_size_sum,omitempty"` + SeriesFetchCount int64 `protobuf:"varint,14,opt,name=series_fetch_count,json=seriesFetchCount,proto3" json:"series_fetch_count,omitempty"` + ChunksTouched int64 `protobuf:"varint,15,opt,name=chunks_touched,json=chunksTouched,proto3" json:"chunks_touched,omitempty"` + ChunksTouchedSizeSum int64 `protobuf:"varint,16,opt,name=chunks_touched_size_sum,json=chunksTouchedSizeSum,proto3" json:"chunks_touched_size_sum,omitempty"` + ChunksFetched int64 `protobuf:"varint,17,opt,name=chunks_fetched,json=chunksFetched,proto3" json:"chunks_fetched,omitempty"` + ChunksFetchedSizeSum int64 `protobuf:"varint,18,opt,name=chunks_fetched_size_sum,json=chunksFetchedSizeSum,proto3" json:"chunks_fetched_size_sum,omitempty"` + ChunksFetchCount int64 `protobuf:"varint,19,opt,name=chunks_fetch_count,json=chunksFetchCount,proto3" json:"chunks_fetch_count,omitempty"` + DataDownloadedSizeSum int64 `protobuf:"varint,20,opt,name=data_downloaded_size_sum,json=dataDownloadedSizeSum,proto3" json:"data_downloaded_size_sum,omitempty"` + GetAllDuration time.Duration `protobuf:"bytes,21,opt,name=get_all_duration,json=getAllDuration,proto3,stdduration" json:"get_all_duration"` + MergeDuration time.Duration `protobuf:"bytes,22,opt,name=merge_duration,json=mergeDuration,proto3,stdduration" json:"merge_duration"` } func (m *QueryStats) Reset() { *m = QueryStats{} } @@ -370,48 +377,53 @@ func init() { func init() { proto.RegisterFile("store/hintspb/hints.proto", fileDescriptor_b82aa23c4c11e83f) } var fileDescriptor_b82aa23c4c11e83f = []byte{ - // 652 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x4f, 0x13, 0x41, - 0x1c, 0xed, 0x52, 0x3e, 0x7f, 0x95, 0x52, 0xa6, 0x15, 0x16, 0x0e, 0x2b, 0x69, 0x42, 0x82, 0x86, - 0x14, 0x83, 0x31, 0x46, 0x3d, 0x09, 0x86, 0x78, 0x50, 0x13, 0x5a, 0x83, 0x89, 0x9a, 0x4c, 0xf6, - 0x63, 0xec, 0x6e, 0x68, 0x77, 0x96, 0x9d, 0xd9, 0x18, 0xb8, 0x7b, 0x35, 0xfe, 0x59, 0xc4, 0x13, - 0x47, 0x4f, 0x46, 0xe1, 0x1f, 0x31, 0x3b, 0x1f, 0xec, 0x4c, 0xb9, 0xf6, 0x02, 0xcd, 0xfb, 0xbd, - 0xf7, 0xe6, 0xbd, 0xdf, 0x4c, 0xb2, 0xb0, 0xc1, 0x38, 0xcd, 0xc9, 0x5e, 0x9c, 0xa4, 0x9c, 0x65, - 0x81, 0xfc, 0xdf, 0xcb, 0x72, 0xca, 0x29, 0x5a, 0x50, 0xe0, 0x66, 0x67, 0x48, 0x87, 0x54, 0x60, - 0x7b, 0xe5, 0x2f, 0x39, 0xde, 0x54, 0x4a, 0xf1, 0x37, 0x0b, 0xf6, 0xf8, 0x79, 0x46, 0x94, 0xb2, - 0xfb, 0xdd, 0x01, 0x34, 0x20, 0x79, 0x42, 0x58, 0x9f, 0x9c, 0x15, 0x84, 0xf1, 0x37, 0xa5, 0x13, - 0x7a, 0x05, 0xcd, 0x60, 0x44, 0xc3, 0x53, 0x3c, 0xf6, 0x79, 0x18, 0x93, 0x9c, 0xb9, 0xce, 0x56, - 0x7d, 0xa7, 0xb1, 0xdf, 0xe9, 0xf1, 0xd8, 0x4f, 0x29, 0xeb, 0xbd, 0xf5, 0x03, 0x32, 0x7a, 0x27, - 0x87, 0x07, 0xb3, 0x97, 0x7f, 0x1e, 0xd4, 0xfa, 0xcb, 0x42, 0xa1, 0x30, 0x86, 0x76, 0x01, 0x91, - 0xd4, 0x0f, 0x46, 0x04, 0x9f, 0x15, 0x24, 0x3f, 0xc7, 0x8c, 0xfb, 0x9c, 0xb9, 0x33, 0x5b, 0xce, - 0xce, 0x62, 0xbf, 0x25, 0x27, 0xc7, 0xe5, 0x60, 0x50, 0xe2, 0xdd, 0x1f, 0x0e, 0xb4, 0x75, 0x0e, - 0x96, 0xd1, 0x94, 0x11, 0x19, 0xe4, 0x25, 0x34, 0x4b, 0x79, 0x42, 0x22, 0x2c, 0xec, 0x75, 0x90, - 0x66, 0x4f, 0x55, 0xee, 0x1d, 0x94, 0xb0, 0x8e, 0xa0, 0xb8, 0x02, 0x63, 0xe8, 0x05, 0x34, 0x26, - 0xcf, 0x6e, 0xec, 0xb7, 0x6f, 0x95, 0xd5, 0xf1, 0x42, 0xee, 0xf4, 0xe1, 0xac, 0x0a, 0xb4, 0x0e, - 0x73, 0xc2, 0x05, 0x35, 0x61, 0x26, 0x89, 0x5c, 0x67, 0xcb, 0xd9, 0x59, 0xea, 0xcf, 0x24, 0x51, - 0xf7, 0x33, 0xac, 0x89, 0xf2, 0xef, 0xfd, 0xf1, 0xd4, 0x97, 0xd6, 0x3d, 0x81, 0x75, 0xd3, 0x7c, - 0x5a, 0x9b, 0xe8, 0x7e, 0x51, 0xbe, 0x27, 0xfe, 0xa8, 0x98, 0x7e, 0xea, 0x8f, 0xe0, 0x5a, 0xee, - 0x53, 0x8b, 0xfd, 0x6b, 0x01, 0xa0, 0xba, 0x25, 0xb4, 0xad, 0xa2, 0x32, 0xac, 0x68, 0xe2, 0x5a, - 0xea, 0x2a, 0x0e, 0x3b, 0x96, 0x20, 0xea, 0x41, 0x7b, 0x4c, 0xf2, 0x21, 0x89, 0x30, 0x13, 0x2f, - 0x0a, 0x87, 0xb4, 0x48, 0xb9, 0xb8, 0xfe, 0x7a, 0x7f, 0x55, 0x8e, 0xe4, 0x5b, 0x3b, 0x2c, 0x07, - 0x06, 0x3f, 0x8c, 0x8b, 0xf4, 0x54, 0xf3, 0xeb, 0x26, 0xff, 0x50, 0x4c, 0x24, 0xff, 0x21, 0xb4, - 0x32, 0xca, 0x78, 0x92, 0x0e, 0x19, 0xe6, 0xb4, 0x08, 0x63, 0x12, 0xb9, 0xb3, 0x82, 0xbc, 0xa2, - 0xf1, 0x0f, 0x12, 0x46, 0xcf, 0x61, 0x63, 0x92, 0x8a, 0x59, 0x72, 0x41, 0x30, 0x2b, 0xc6, 0xee, - 0x9c, 0xd0, 0xac, 0x4d, 0x68, 0x06, 0xc9, 0x05, 0x19, 0x14, 0x63, 0xf4, 0x08, 0x56, 0x0d, 0x29, - 0xfe, 0x4a, 0x78, 0x18, 0xbb, 0xf3, 0x93, 0xc7, 0x1c, 0x95, 0xb0, 0x95, 0x48, 0x10, 0x49, 0xe4, - 0x2e, 0xd8, 0xd4, 0x23, 0x09, 0x5b, 0x89, 0x14, 0xb5, 0x4a, 0xb4, 0x68, 0x27, 0x52, 0x1a, 0x9d, - 0xe8, 0x31, 0x74, 0x6c, 0xa9, 0x5a, 0xd4, 0x92, 0x50, 0x21, 0x4b, 0x25, 0x37, 0xb5, 0x0d, 0x4d, - 0x75, 0x05, 0x7a, 0x4f, 0x20, 0x2f, 0x4c, 0xa2, 0x7a, 0x4b, 0x4f, 0x61, 0xdd, 0xa6, 0x55, 0x89, - 0x1a, 0x82, 0xdf, 0xb1, 0xf8, 0x3a, 0x4f, 0xe5, 0xae, 0x3b, 0xdf, 0x33, 0xdd, 0x75, 0xe3, 0xca, - 0xfd, 0x4e, 0xdf, 0x65, 0xd3, 0x7d, 0xa2, 0xed, 0x2e, 0x20, 0x53, 0xa6, 0xba, 0x36, 0x85, 0xa2, - 0x65, 0x28, 0x6e, 0x9b, 0xaa, 0xc7, 0xa3, 0x9b, 0xae, 0xc8, 0x2c, 0x12, 0x35, 0x9a, 0xda, 0xb4, - 0x2a, 0x4b, 0x4b, 0x66, 0xb1, 0xf8, 0x46, 0x53, 0x25, 0xd3, 0x4d, 0x57, 0x4d, 0x77, 0xa3, 0xa9, - 0x4d, 0xab, 0xdc, 0x91, 0xe9, 0x7e, 0xb7, 0xa9, 0x29, 0x53, 0x4d, 0xdb, 0xb2, 0xa9, 0xa1, 0x90, - 0x4d, 0x9f, 0x81, 0x1b, 0xf9, 0xdc, 0xc7, 0x11, 0xfd, 0x96, 0x8e, 0xa8, 0x1f, 0x99, 0xa7, 0x74, - 0x84, 0xe6, 0x7e, 0x39, 0x7f, 0x7d, 0x3b, 0x56, 0xc7, 0x1c, 0x6c, 0x5f, 0xfe, 0xf3, 0x6a, 0x97, - 0xd7, 0x9e, 0x73, 0x75, 0xed, 0x39, 0x7f, 0xaf, 0x3d, 0xe7, 0xe7, 0x8d, 0x57, 0xbb, 0xba, 0xf1, - 0x6a, 0xbf, 0x6f, 0xbc, 0xda, 0x27, 0xfd, 0x09, 0x0b, 0xe6, 0xc5, 0x87, 0xe9, 0xc9, 0xff, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x4e, 0x26, 0x0a, 0x60, 0xef, 0x06, 0x00, 0x00, + // 731 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x4f, 0xdb, 0x30, + 0x1c, 0x6d, 0x28, 0x1f, 0xc5, 0x1d, 0xa1, 0xb8, 0x05, 0x02, 0x87, 0x80, 0x2a, 0x21, 0xb1, 0x09, + 0xa5, 0x13, 0xd3, 0x34, 0x6d, 0x3b, 0xf1, 0x21, 0x34, 0x4d, 0x63, 0x12, 0xe9, 0xc4, 0xa4, 0x6d, + 0x92, 0x95, 0x34, 0x26, 0x8d, 0x48, 0xe3, 0x12, 0x3b, 0x9a, 0xe0, 0xbe, 0xeb, 0xb4, 0xe3, 0xfe, + 0x24, 0x8e, 0x1c, 0x77, 0xda, 0x07, 0x68, 0xff, 0xc7, 0x14, 0x7f, 0x34, 0x4e, 0xb9, 0xec, 0xd0, + 0x0b, 0xb4, 0xef, 0xf7, 0xde, 0xf3, 0x7b, 0x76, 0x1a, 0x83, 0x35, 0xca, 0x48, 0x8a, 0x3b, 0xfd, + 0x28, 0x61, 0x74, 0xe8, 0x8b, 0xff, 0xce, 0x30, 0x25, 0x8c, 0xc0, 0x39, 0x09, 0xae, 0xb7, 0x42, + 0x12, 0x12, 0x8e, 0x75, 0xf2, 0x4f, 0x62, 0xbc, 0x6e, 0x87, 0x84, 0x84, 0x31, 0xee, 0xf0, 0x6f, + 0x7e, 0x76, 0xd6, 0x09, 0xb2, 0xd4, 0x63, 0x11, 0x49, 0xe4, 0x5c, 0x3a, 0xf3, 0xbf, 0x43, 0xbf, + 0xc3, 0x2e, 0x87, 0x58, 0x3a, 0xb7, 0xbf, 0x18, 0x00, 0x76, 0x71, 0x1a, 0x61, 0xea, 0xe2, 0x8b, + 0x0c, 0x53, 0xf6, 0x2a, 0x5f, 0x09, 0xee, 0x01, 0xd3, 0x8f, 0x49, 0xef, 0x1c, 0x0d, 0x3c, 0xd6, + 0xeb, 0xe3, 0x94, 0x5a, 0xc6, 0x66, 0x75, 0xbb, 0xbe, 0xdb, 0x72, 0x58, 0xdf, 0x4b, 0x08, 0x75, + 0xde, 0x78, 0x3e, 0x8e, 0x8f, 0xc5, 0x70, 0x7f, 0xfa, 0xfa, 0xe7, 0x46, 0xc5, 0x5d, 0xe0, 0x0a, + 0x89, 0x51, 0xb8, 0x03, 0x20, 0x4e, 0x3c, 0x3f, 0xc6, 0xe8, 0x22, 0xc3, 0xe9, 0x25, 0xa2, 0xcc, + 0x63, 0xd4, 0x9a, 0xda, 0x34, 0xb6, 0x6b, 0x6e, 0x43, 0x4c, 0x4e, 0xf2, 0x41, 0x37, 0xc7, 0xdb, + 0x5f, 0x0d, 0xd0, 0x54, 0x39, 0xe8, 0x90, 0x24, 0x14, 0x8b, 0x20, 0x2f, 0x81, 0x99, 0xcb, 0x23, + 0x1c, 0x20, 0x6e, 0xaf, 0x82, 0x98, 0x8e, 0xdc, 0x12, 0x67, 0x3f, 0x87, 0x55, 0x04, 0xc9, 0xe5, + 0x18, 0x85, 0x2f, 0x40, 0x7d, 0x7c, 0xed, 0xfa, 0x6e, 0x73, 0xa4, 0x2c, 0x96, 0xe7, 0x72, 0xc3, + 0x05, 0x17, 0x45, 0xa0, 0x55, 0x30, 0xc3, 0x5d, 0xa0, 0x09, 0xa6, 0xa2, 0xc0, 0x32, 0x36, 0x8d, + 0xed, 0x79, 0x77, 0x2a, 0x0a, 0xda, 0x1f, 0xc1, 0x0a, 0x2f, 0xff, 0xd6, 0x1b, 0x4c, 0x7c, 0xd3, + 0xda, 0xa7, 0x60, 0x55, 0x37, 0x9f, 0xd4, 0x4e, 0xb4, 0x3f, 0x49, 0xdf, 0x53, 0x2f, 0xce, 0x26, + 0x9f, 0xfa, 0x3d, 0xb0, 0x4a, 0xee, 0x13, 0x8b, 0xfd, 0xb7, 0x06, 0x40, 0x71, 0x4a, 0x70, 0x4b, + 0x46, 0xa5, 0x48, 0xd2, 0xf8, 0xb1, 0x54, 0x65, 0x1c, 0x7a, 0x22, 0x40, 0xe8, 0x80, 0xe6, 0x00, + 0xa7, 0x21, 0x0e, 0x10, 0xe5, 0x4f, 0x14, 0xea, 0x91, 0x2c, 0x61, 0xfc, 0xf8, 0xab, 0xee, 0x92, + 0x18, 0x89, 0x67, 0xed, 0x20, 0x1f, 0x68, 0xfc, 0x5e, 0x3f, 0x4b, 0xce, 0x15, 0xbf, 0xaa, 0xf3, + 0x0f, 0xf8, 0x44, 0xf0, 0x1f, 0x82, 0xc6, 0x90, 0x50, 0x16, 0x25, 0x21, 0x45, 0x8c, 0x64, 0xbd, + 0x3e, 0x0e, 0xac, 0x69, 0x4e, 0x5e, 0x54, 0xf8, 0x3b, 0x01, 0xc3, 0xe7, 0x60, 0x6d, 0x9c, 0x8a, + 0x68, 0x74, 0x85, 0x11, 0xcd, 0x06, 0xd6, 0x0c, 0xd7, 0xac, 0x8c, 0x69, 0xba, 0xd1, 0x15, 0xee, + 0x66, 0x03, 0xf8, 0x08, 0x2c, 0x69, 0x52, 0x74, 0x86, 0x59, 0xaf, 0x6f, 0xcd, 0x8e, 0x2f, 0x73, + 0x94, 0xc3, 0xa5, 0x44, 0x9c, 0x88, 0x03, 0x6b, 0xae, 0x4c, 0x3d, 0x12, 0x70, 0x29, 0x91, 0xa4, + 0x16, 0x89, 0x6a, 0xe5, 0x44, 0x52, 0xa3, 0x12, 0x3d, 0x06, 0xad, 0xb2, 0x54, 0x6e, 0xd4, 0x3c, + 0x57, 0xc1, 0x92, 0x4a, 0xec, 0xd4, 0x16, 0x30, 0xe5, 0x11, 0xa8, 0x7d, 0x02, 0xe2, 0xc0, 0x04, + 0xaa, 0x76, 0xe9, 0x29, 0x58, 0x2d, 0xd3, 0x8a, 0x44, 0x75, 0xce, 0x6f, 0x95, 0xf8, 0x2a, 0x4f, + 0xe1, 0xae, 0x3a, 0x3f, 0xd0, 0xdd, 0x55, 0xe3, 0xc2, 0xfd, 0x5e, 0xdf, 0x05, 0xdd, 0x7d, 0xac, + 0xed, 0x0e, 0x80, 0xba, 0x4c, 0x76, 0x35, 0xb9, 0xa2, 0xa1, 0x29, 0x46, 0x4d, 0xe5, 0xc3, 0xa3, + 0x9a, 0x2e, 0x8a, 0x2c, 0x02, 0xd5, 0x9a, 0x96, 0x69, 0x45, 0x96, 0x86, 0xc8, 0x52, 0xe2, 0x6b, + 0x4d, 0xa5, 0x4c, 0x35, 0x5d, 0xd2, 0xdd, 0xb5, 0xa6, 0x65, 0x5a, 0xe1, 0x0e, 0x75, 0xf7, 0xfb, + 0x4d, 0x75, 0x99, 0x6c, 0xda, 0x14, 0x4d, 0x35, 0x85, 0x68, 0xfa, 0x0c, 0x58, 0x81, 0xc7, 0x3c, + 0x14, 0x90, 0xcf, 0x49, 0x4c, 0xbc, 0x40, 0x5f, 0xa5, 0xc5, 0x35, 0xcb, 0xf9, 0xfc, 0x70, 0x34, + 0x56, 0xcb, 0x1c, 0x83, 0x46, 0x88, 0x19, 0xf2, 0xe2, 0x18, 0xa9, 0xfb, 0xc9, 0x5a, 0xe6, 0xaf, + 0xe4, 0x35, 0x47, 0x5c, 0x60, 0x8e, 0xba, 0xc0, 0x9c, 0x43, 0x49, 0xd8, 0xaf, 0xe5, 0xaf, 0x85, + 0xef, 0xbf, 0x36, 0x0c, 0xd7, 0x0c, 0x31, 0xdb, 0x8b, 0x63, 0x35, 0x81, 0xaf, 0x81, 0xc9, 0x7f, + 0x9a, 0x85, 0xd9, 0xca, 0xff, 0x9b, 0x2d, 0x70, 0xe9, 0x68, 0xb0, 0x75, 0xfd, 0xc7, 0xae, 0x5c, + 0xdf, 0xda, 0xc6, 0xcd, 0xad, 0x6d, 0xfc, 0xbe, 0xb5, 0x8d, 0x6f, 0x77, 0x76, 0xe5, 0xe6, 0xce, + 0xae, 0xfc, 0xb8, 0xb3, 0x2b, 0x1f, 0xd4, 0xed, 0xeb, 0xcf, 0x72, 0xcb, 0x27, 0xff, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x49, 0xb6, 0x83, 0x90, 0xaa, 0x07, 0x00, 0x00, } func (m *SeriesRequestHints) Marshal() (dAtA []byte, err error) { @@ -708,6 +720,26 @@ func (m *QueryStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + n2, err2 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MergeDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MergeDuration):]) + if err2 != nil { + return 0, err2 + } + i -= n2 + i = encodeVarintHints(dAtA, i, uint64(n2)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + n3, err3 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.GetAllDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.GetAllDuration):]) + if err3 != nil { + return 0, err3 + } + i -= n3 + i = encodeVarintHints(dAtA, i, uint64(n3)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa if m.DataDownloadedSizeSum != 0 { i = encodeVarintHints(dAtA, i, uint64(m.DataDownloadedSizeSum)) i-- @@ -1008,6 +1040,10 @@ func (m *QueryStats) Size() (n int) { if m.DataDownloadedSizeSum != 0 { n += 2 + sovHints(uint64(m.DataDownloadedSizeSum)) } + l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.GetAllDuration) + n += 2 + l + sovHints(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.MergeDuration) + n += 2 + l + sovHints(uint64(l)) return n } @@ -2068,6 +2104,72 @@ func (m *QueryStats) Unmarshal(dAtA []byte) error { break } } + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetAllDuration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHints + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthHints + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthHints + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.GetAllDuration, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MergeDuration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHints + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthHints + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthHints + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.MergeDuration, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipHints(dAtA[iNdEx:]) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/hints.proto b/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/hints.proto index aeb3ac11d1..69c60d2a7d 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/hints.proto +++ b/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/hints.proto @@ -5,6 +5,7 @@ syntax = "proto3"; package hintspb; import "gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; import "store/storepb/types.proto"; option go_package = "hintspb"; @@ -90,4 +91,6 @@ message QueryStats { int64 chunks_fetch_count = 19; int64 data_downloaded_size_sum = 20; + google.protobuf.Duration get_all_duration = 21 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; + google.protobuf.Duration merge_duration = 22 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; } \ No newline at end of file diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go index 244ae5592d..fd6d4c0195 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go @@ -42,7 +42,6 @@ import ( "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" "github.com/thanos-io/thanos/pkg/store/storepb/prompb" - "github.com/thanos-io/thanos/pkg/stringset" "github.com/thanos-io/thanos/pkg/tracing" ) @@ -54,7 +53,6 @@ type PrometheusStore struct { buffers sync.Pool component component.StoreAPI externalLabelsFn func() labels.Labels - labelNamesSet func() stringset.Set promVersion func() string timestamps func() (mint int64, maxt int64) @@ -81,7 +79,6 @@ func NewPrometheusStore( component component.StoreAPI, externalLabelsFn func() labels.Labels, timestamps func() (mint int64, maxt int64), - labelNamesSet func() stringset.Set, promVersion func() string, ) (*PrometheusStore, error) { if logger == nil { @@ -95,7 +92,6 @@ func NewPrometheusStore( externalLabelsFn: externalLabelsFn, promVersion: promVersion, timestamps: timestamps, - labelNamesSet: labelNamesSet, remoteReadAcceptableResponses: []prompb.ReadRequest_ResponseType{prompb.ReadRequest_STREAMED_XOR_CHUNKS, prompb.ReadRequest_SAMPLES}, buffers: sync.Pool{New: func() interface{} { b := make([]byte, 0, initialBufSize) @@ -149,7 +145,8 @@ func (p *PrometheusStore) putBuffer(b *[]byte) { // Series returns all series for a requested time range and label matcher. func (p *PrometheusStore) Series(r *storepb.SeriesRequest, seriesSrv storepb.Store_SeriesServer) error { - s := newFlushableServer(seriesSrv, p.labelNamesSet(), r.WithoutReplicaLabels) + s := newFlushableServer(seriesSrv, sortingStrategyStore) + extLset := p.externalLabelsFn() match, matchers, err := matchesExternalLabels(r.Matchers, extLset) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy_heap.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy_heap.go index 7ea18b134d..51631b388a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy_heap.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy_heap.go @@ -164,9 +164,7 @@ func (d *dedupResponseHeap) At() *storepb.SeriesResponse { // tournament trees need n-1 auxiliary nodes so there // might not be much of a difference. type ProxyResponseHeap struct { - nodes []ProxyResponseHeapNode - iLblsScratch labels.Labels - jLblsScratch labels.Labels + nodes []ProxyResponseHeapNode } func (h *ProxyResponseHeap) Less(i, j int) bool { @@ -174,26 +172,10 @@ func (h *ProxyResponseHeap) Less(i, j int) bool { jResp := h.nodes[j].rs.At() if iResp.GetSeries() != nil && jResp.GetSeries() != nil { - // Response sets are sorted before adding external labels. - // This comparison excludes those labels to keep the same order. - iStoreLbls := h.nodes[i].rs.StoreLabels() - jStoreLbls := h.nodes[j].rs.StoreLabels() - iLbls := labelpb.ZLabelsToPromLabels(iResp.GetSeries().Labels) jLbls := labelpb.ZLabelsToPromLabels(jResp.GetSeries().Labels) - copyLabels(&h.iLblsScratch, iLbls) - copyLabels(&h.jLblsScratch, jLbls) - - var iExtLbls, jExtLbls labels.Labels - h.iLblsScratch, iExtLbls = dropLabels(h.iLblsScratch, iStoreLbls) - h.jLblsScratch, jExtLbls = dropLabels(h.jLblsScratch, jStoreLbls) - - c := labels.Compare(h.iLblsScratch, h.jLblsScratch) - if c != 0 { - return c < 0 - } - return labels.Compare(iExtLbls, jExtLbls) < 0 + return labels.Compare(iLbls, jLbls) < 0 } else if iResp.GetSeries() == nil && jResp.GetSeries() != nil { return true } else if iResp.GetSeries() != nil && jResp.GetSeries() == nil { @@ -774,9 +756,9 @@ func newEagerRespSet( // This should be used only for stores that does not support doing this on server side. // See docs/proposals-accepted/20221129-avoid-global-sort.md for details. - if len(l.removeLabels) > 0 { - sortWithoutLabels(l.bufferedResponses, l.removeLabels) - } + // NOTE. Client is not guaranteed to give a sorted response when extLset is added + // Generally we need to resort here. + sortWithoutLabels(l.bufferedResponses, l.removeLabels) }(ret) @@ -794,34 +776,6 @@ func rmLabels(l labels.Labels, labelsToRemove map[string]struct{}) labels.Labels return l } -// dropLabels removes labels from the given label set and returns the removed labels. -func dropLabels(l labels.Labels, labelsToDrop map[string]struct{}) (labels.Labels, labels.Labels) { - cutoff := len(l) - for i := 0; i < len(l); i++ { - if i == cutoff { - break - } - if _, ok := labelsToDrop[l[i].Name]; !ok { - continue - } - - lbl := l[i] - l = append(append(l[:i], l[i+1:]...), lbl) - cutoff-- - i-- - } - - return l[:cutoff], l[cutoff:] -} - -func copyLabels(dest *labels.Labels, src labels.Labels) { - if len(*dest) < cap(src) { - *dest = make([]labels.Label, len(src)) - } - *dest = (*dest)[:len(src)] - copy(*dest, src) -} - // sortWithoutLabels removes given labels from series and re-sorts the series responses that the same // series with different labels are coming right after each other. Other types of responses are moved to front. func sortWithoutLabels(set []*storepb.SeriesResponse, labelsToRemove map[string]struct{}) { @@ -831,7 +785,9 @@ func sortWithoutLabels(set []*storepb.SeriesResponse, labelsToRemove map[string] continue } - ser.Labels = labelpb.ZLabelsFromPromLabels(rmLabels(labelpb.ZLabelsToPromLabels(ser.Labels), labelsToRemove)) + if len(labelsToRemove) > 0 { + ser.Labels = labelpb.ZLabelsFromPromLabels(rmLabels(labelpb.ZLabelsToPromLabels(ser.Labels), labelsToRemove)) + } } // With the re-ordered label sets, re-sorting all series aligns the same series diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go index 73604b9236..b5182f3008 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go @@ -13,7 +13,6 @@ import ( "sync" "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/pkg/errors" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" @@ -26,7 +25,6 @@ import ( "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" - "github.com/thanos-io/thanos/pkg/stringset" ) const RemoteReadFrameLimit = 1048576 @@ -46,9 +44,6 @@ type TSDBStore struct { buffers sync.Pool maxBytesPerFrame int - lmx sync.RWMutex - labelNamesSet stringset.Set - extLset labels.Labels mtx sync.RWMutex } @@ -77,7 +72,6 @@ func NewTSDBStore(logger log.Logger, db TSDBReader, component component.StoreAPI component: component, extLset: extLset, maxBytesPerFrame: RemoteReadFrameLimit, - labelNamesSet: stringset.AllStrings(), buffers: sync.Pool{New: func() interface{} { b := make([]byte, 0, initialBufSize) return &b @@ -175,7 +169,7 @@ type CloseDelegator interface { // Series returns all series for a requested time range and label matcher. The returned data may // exceed the requested time bounds. func (s *TSDBStore) Series(r *storepb.SeriesRequest, seriesSrv storepb.Store_SeriesServer) error { - srv := newFlushableServer(seriesSrv, s.LabelNamesSet(), r.WithoutReplicaLabels) + srv := newFlushableServer(seriesSrv, sortingStrategyStore) match, matchers, err := matchesExternalLabels(r.Matchers, s.getExtLset()) if err != nil { @@ -376,38 +370,3 @@ func (s *TSDBStore) LabelValues(ctx context.Context, r *storepb.LabelValuesReque return &storepb.LabelValuesResponse{Values: values}, nil } - -func (s *TSDBStore) UpdateLabelNames(ctx context.Context) { - newSet := stringset.New() - q, err := s.db.ChunkQuerier(ctx, math.MinInt64, math.MaxInt64) - if err != nil { - level.Warn(s.logger).Log("msg", "error creating tsdb querier", "err", err.Error()) - s.setLabelNamesSet(stringset.AllStrings()) - return - } - defer runutil.CloseWithLogOnErr(s.logger, q, "close tsdb querier label names") - - res, _, err := q.LabelNames() - if err != nil { - level.Warn(s.logger).Log("msg", "error getting label names", "err", err.Error()) - s.setLabelNamesSet(stringset.AllStrings()) - return - } - for _, l := range res { - newSet.Insert(l) - } - s.setLabelNamesSet(newSet) -} - -func (s *TSDBStore) setLabelNamesSet(newSet stringset.Set) { - s.lmx.Lock() - s.labelNamesSet = newSet - s.lmx.Unlock() -} - -func (b *TSDBStore) LabelNamesSet() stringset.Set { - b.lmx.RLock() - defer b.lmx.RUnlock() - - return b.labelNamesSet -} diff --git a/vendor/github.com/thanos-io/thanos/pkg/stringset/set.go b/vendor/github.com/thanos-io/thanos/pkg/stringset/set.go deleted file mode 100644 index 080071570f..0000000000 --- a/vendor/github.com/thanos-io/thanos/pkg/stringset/set.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package stringset - -import ( - cuckoo "github.com/seiflotfy/cuckoofilter" -) - -type Set interface { - Has(string) bool - HasAny([]string) bool - // Count returns the number of elements in the set. - // A value of -1 indicates infinite size and can be returned by a - // set representing all possible string values. - Count() int -} - -type fixedSet struct { - cuckoo *cuckoo.Filter -} - -func (f fixedSet) HasAny(strings []string) bool { - for _, s := range strings { - if f.Has(s) { - return true - } - } - return false -} - -func NewFromStrings(items ...string) Set { - f := cuckoo.NewFilter(uint(len(items))) - for _, label := range items { - f.InsertUnique([]byte(label)) - } - - return &fixedSet{cuckoo: f} -} - -func (f fixedSet) Has(s string) bool { - return f.cuckoo.Lookup([]byte(s)) -} - -func (f fixedSet) Count() int { - return int(f.cuckoo.Count()) -} - -type mutableSet struct { - cuckoo *cuckoo.ScalableCuckooFilter -} - -type MutableSet interface { - Set - Insert(string) -} - -func New() MutableSet { - return &mutableSet{ - cuckoo: cuckoo.NewScalableCuckooFilter(), - } -} - -func (e mutableSet) Insert(s string) { - e.cuckoo.InsertUnique([]byte(s)) -} - -func (e mutableSet) Has(s string) bool { - return e.cuckoo.Lookup([]byte(s)) -} - -func (e mutableSet) HasAny(strings []string) bool { - for _, s := range strings { - if e.Has(s) { - return true - } - } - return false -} - -func (e mutableSet) Count() int { - return int(e.cuckoo.Count()) -} - -type allStringsSet struct{} - -func (e allStringsSet) HasAny(_ []string) bool { - return true -} - -func AllStrings() *allStringsSet { - return &allStringsSet{} -} - -func (e allStringsSet) Has(_ string) bool { - return true -} - -func (e allStringsSet) Count() int { - return -1 -} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 83f112c4c8..4756ad5f79 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -38,7 +38,7 @@ var X86 struct { HasAVX512F bool // Advanced vector extension 512 Foundation Instructions HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions - HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions Instructions + HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions @@ -54,6 +54,9 @@ var X86 struct { HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions + HasAMXTile bool // Advanced Matrix Extension Tile instructions + HasAMXInt8 bool // Advanced Matrix Extension Int8 instructions + HasAMXBF16 bool // Advanced Matrix Extension BFloat16 instructions HasBMI1 bool // Bit manipulation instruction set 1 HasBMI2 bool // Bit manipulation instruction set 2 HasCX16 bool // Compare and exchange 16 Bytes diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index f5aacfc825..2dcde8285d 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -37,6 +37,9 @@ func initOptions() { {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, + {Name: "amxtile", Feature: &X86.HasAMXTile}, + {Name: "amxint8", Feature: &X86.HasAMXInt8}, + {Name: "amxbf16", Feature: &X86.HasAMXBF16}, {Name: "bmi1", Feature: &X86.HasBMI1}, {Name: "bmi2", Feature: &X86.HasBMI2}, {Name: "cx16", Feature: &X86.HasCX16}, @@ -138,6 +141,10 @@ func archInit() { eax71, _, _, _ := cpuid(7, 1) X86.HasAVX512BF16 = isSet(5, eax71) } + + X86.HasAMXTile = isSet(24, edx7) + X86.HasAMXInt8 = isSet(25, edx7) + X86.HasAMXBF16 = isSet(22, edx7) } func isSet(bitpos uint, value uint32) bool { diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 8f775fafa6..47fa6a7ebd 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -583,6 +583,7 @@ ccflags="$@" $2 ~ /^PERF_/ || $2 ~ /^SECCOMP_MODE_/ || $2 ~ /^SEEK_/ || + $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || $2 ~ /^SYNC_FILE_RANGE_/ || $2 !~ /IOC_MAGIC/ && diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index a730878e49..0ba030197f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -2471,6 +2471,29 @@ func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask * return pselect6(nfd, r, w, e, mutableTimeout, kernelMask) } +//sys schedSetattr(pid int, attr *SchedAttr, flags uint) (err error) +//sys schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) + +// SchedSetAttr is a wrapper for sched_setattr(2) syscall. +// https://man7.org/linux/man-pages/man2/sched_setattr.2.html +func SchedSetAttr(pid int, attr *SchedAttr, flags uint) error { + if attr == nil { + return EINVAL + } + attr.Size = SizeofSchedAttr + return schedSetattr(pid, attr, flags) +} + +// SchedGetAttr is a wrapper for sched_getattr(2) syscall. +// https://man7.org/linux/man-pages/man2/sched_getattr.2.html +func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { + attr := &SchedAttr{} + if err := schedGetattr(pid, attr, SizeofSchedAttr, flags); err != nil { + return nil, err + } + return attr, nil +} + /* * Unimplemented */ diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 8bb30e7ce3..f6eda27050 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -549,6 +549,9 @@ func SetNonblock(fd int, nonblocking bool) (err error) { if err != nil { return err } + if (flag&O_NONBLOCK != 0) == nonblocking { + return nil + } if nonblocking { flag |= O_NONBLOCK } else { diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 3784f402e5..0787a043be 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -2821,6 +2821,23 @@ const ( RWF_SUPPORTED = 0x1f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 + SCHED_BATCH = 0x3 + SCHED_DEADLINE = 0x6 + SCHED_FIFO = 0x1 + SCHED_FLAG_ALL = 0x7f + SCHED_FLAG_DL_OVERRUN = 0x4 + SCHED_FLAG_KEEP_ALL = 0x18 + SCHED_FLAG_KEEP_PARAMS = 0x10 + SCHED_FLAG_KEEP_POLICY = 0x8 + SCHED_FLAG_RECLAIM = 0x2 + SCHED_FLAG_RESET_ON_FORK = 0x1 + SCHED_FLAG_UTIL_CLAMP = 0x60 + SCHED_FLAG_UTIL_CLAMP_MAX = 0x40 + SCHED_FLAG_UTIL_CLAMP_MIN = 0x20 + SCHED_IDLE = 0x5 + SCHED_NORMAL = 0x0 + SCHED_RESET_ON_FORK = 0x40000000 + SCHED_RR = 0x2 SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index a07321bed9..14ab34a565 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -2197,3 +2197,23 @@ func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { RawSyscallNoError(SYS_GETRESGID, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func schedSetattr(pid int, attr *SchedAttr, flags uint) (err error) { + _, _, e1 := Syscall(SYS_SCHED_SETATTR, uintptr(pid), uintptr(unsafe.Pointer(attr)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) { + _, _, e1 := Syscall6(SYS_SCHED_GETATTR, uintptr(pid), uintptr(unsafe.Pointer(attr)), uintptr(size), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 26ef52aafc..494493c78c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -5868,3 +5868,18 @@ const ( VIRTIO_NET_HDR_GSO_UDP_L4 = 0x5 VIRTIO_NET_HDR_GSO_ECN = 0x80 ) + +type SchedAttr struct { + Size uint32 + Policy uint32 + Flags uint64 + Nice int32 + Priority uint32 + Runtime uint64 + Deadline uint64 + Period uint64 + Util_min uint32 + Util_max uint32 +} + +const SizeofSchedAttr = 0x38 diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 373d16388a..67bad0926a 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -216,7 +216,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath //sys TerminateProcess(handle Handle, exitcode uint32) (err error) //sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) -//sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW +//sys getStartupInfo(startupInfo *StartupInfo) = GetStartupInfoW //sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) //sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) //sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] @@ -437,6 +437,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmGetWindowAttribute //sys DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmSetWindowAttribute +// Windows Multimedia API +//sys TimeBeginPeriod (period uint32) (err error) [failretval != 0] = winmm.timeBeginPeriod +//sys TimeEndPeriod (period uint32) (err error) [failretval != 0] = winmm.timeEndPeriod + // syscall interface implementation for other packages // GetCurrentProcess returns the handle for the current process. @@ -1624,6 +1628,11 @@ func SetConsoleCursorPosition(console Handle, position Coord) error { return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) } +func GetStartupInfo(startupInfo *StartupInfo) error { + getStartupInfo(startupInfo) + return nil +} + func (s NTStatus) Errno() syscall.Errno { return rtlNtStatusToDosErrorNoTeb(s) } diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 566dd3e315..5c385580f6 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -55,6 +55,7 @@ var ( moduser32 = NewLazySystemDLL("user32.dll") moduserenv = NewLazySystemDLL("userenv.dll") modversion = NewLazySystemDLL("version.dll") + modwinmm = NewLazySystemDLL("winmm.dll") modwintrust = NewLazySystemDLL("wintrust.dll") modws2_32 = NewLazySystemDLL("ws2_32.dll") modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") @@ -468,6 +469,8 @@ var ( procGetFileVersionInfoSizeW = modversion.NewProc("GetFileVersionInfoSizeW") procGetFileVersionInfoW = modversion.NewProc("GetFileVersionInfoW") procVerQueryValueW = modversion.NewProc("VerQueryValueW") + proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod") + proctimeEndPeriod = modwinmm.NewProc("timeEndPeriod") procWinVerifyTrustEx = modwintrust.NewProc("WinVerifyTrustEx") procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") @@ -2367,11 +2370,8 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin return } -func GetStartupInfo(startupInfo *StartupInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } +func getStartupInfo(startupInfo *StartupInfo) { + syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) return } @@ -4017,6 +4017,22 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint return } +func TimeBeginPeriod(period uint32) (err error) { + r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func TimeEndPeriod(period uint32) (err error) { + r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) if r0 != 0 { diff --git a/vendor/modules.txt b/vendor/modules.txt index a4f3202b16..e92ea755dc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -284,9 +284,6 @@ github.com/davecgh/go-spew/spew # github.com/dennwc/varint v1.0.0 ## explicit; go 1.12 github.com/dennwc/varint -# github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165 -## explicit -github.com/dgryski/go-metro # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f ## explicit github.com/dgryski/go-rendezvous @@ -842,9 +839,6 @@ github.com/sean-/seed # github.com/segmentio/fasthash v1.0.3 ## explicit; go 1.11 github.com/segmentio/fasthash/fnv1a -# github.com/seiflotfy/cuckoofilter v0.0.0-20220411075957-e3b120b3f5fb -## explicit; go 1.15 -github.com/seiflotfy/cuckoofilter # github.com/sercand/kuberesolver v2.4.0+incompatible => github.com/sercand/kuberesolver/v4 v4.0.0 ## explicit; go 1.14 github.com/sercand/kuberesolver @@ -875,7 +869,7 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/thanos-io/objstore v0.0.0-20230816175749-20395bffdf26 +# github.com/thanos-io/objstore v0.0.0-20230921130928-63a603e651ed ## explicit; go 1.18 github.com/thanos-io/objstore github.com/thanos-io/objstore/exthttp @@ -908,7 +902,7 @@ github.com/thanos-io/promql-engine/logicalplan github.com/thanos-io/promql-engine/parser github.com/thanos-io/promql-engine/query github.com/thanos-io/promql-engine/worker -# github.com/thanos-io/thanos v0.32.3-0.20230911095949-f6a39507b6bd +# github.com/thanos-io/thanos v0.32.4-0.20230921182036-6257767ec9d0 ## explicit; go 1.18 github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader @@ -948,7 +942,6 @@ github.com/thanos-io/thanos/pkg/store/hintspb github.com/thanos-io/thanos/pkg/store/labelpb github.com/thanos-io/thanos/pkg/store/storepb github.com/thanos-io/thanos/pkg/store/storepb/prompb -github.com/thanos-io/thanos/pkg/stringset github.com/thanos-io/thanos/pkg/strutil github.com/thanos-io/thanos/pkg/targets/targetspb github.com/thanos-io/thanos/pkg/tenancy @@ -1230,7 +1223,7 @@ golang.org/x/oauth2/jwt # golang.org/x/sync v0.3.0 ## explicit; go 1.17 golang.org/x/sync/errgroup -# golang.org/x/sys v0.11.0 +# golang.org/x/sys v0.12.0 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs From 179c0d6625aff11692aee68150e2adfcd47529df Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Fri, 22 Sep 2023 13:40:16 -0700 Subject: [PATCH 02/13] retry chunk pool exhaustion error in querier, not in query frontend (#5569) * stop retrying chunk pool exhaustion at query frontend, retry at querier level Signed-off-by: Ben Ye * update integration test Signed-off-by: Ben Ye * refactor Signed-off-by: Ben Ye fix e2e test Signed-off-by: Ben Ye --------- Signed-off-by: Ben Ye --- integration/query_frontend_test.go | 85 ++++++++++++++++++++++ pkg/frontend/transport/retry.go | 39 +++++++++- pkg/frontend/transport/retry_test.go | 23 ++++++ pkg/querier/blocks_store_queryable.go | 81 ++++++++++++++------- pkg/querier/blocks_store_queryable_test.go | 30 ++++++++ pkg/querier/tripperware/query.go | 19 +++++ 6 files changed, 248 insertions(+), 29 deletions(-) diff --git a/integration/query_frontend_test.go b/integration/query_frontend_test.go index 67d6b5f8c5..02054d117e 100644 --- a/integration/query_frontend_test.go +++ b/integration/query_frontend_test.go @@ -21,6 +21,7 @@ import ( "github.com/prometheus/prometheus/prompb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/thanos-io/thanos/pkg/pool" "github.com/cortexproject/cortex/integration/ca" "github.com/cortexproject/cortex/integration/e2e" @@ -436,3 +437,87 @@ func runQueryFrontendTest(t *testing.T, cfg queryFrontendTestConfig) { assertServiceMetricsPrefixes(t, QueryFrontend, queryFrontend) assertServiceMetricsPrefixes(t, QueryScheduler, queryScheduler) } + +func TestQueryFrontendNoRetryChunkPool(t *testing.T) { + const blockRangePeriod = 5 * time.Second + + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Configure the blocks storage to frequently compact TSDB head + // and ship blocks to the storage. + flags := mergeFlags(BlocksStorageFlags(), map[string]string{ + "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), + "-blocks-storage.bucket-store.max-chunk-pool-bytes": "1", + }) + + // Start dependencies. + consul := e2edb.NewConsul() + minio := e2edb.NewMinio(9000, flags["-blocks-storage.s3.bucket-name"]) + require.NoError(t, s.StartAndWaitReady(consul, minio)) + + // Start Cortex components for the write path. + distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester := e2ecortex.NewIngester("ingester", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(distributor, ingester)) + + // Wait until the distributor has updated the ring. + require.NoError(t, distributor.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + + // Push some series to Cortex. + c, err := e2ecortex.NewClient(distributor.HTTPEndpoint(), "", "", "", "user-1") + require.NoError(t, err) + + seriesTimestamp := time.Now() + series2Timestamp := seriesTimestamp.Add(blockRangePeriod * 2) + series1, _ := generateSeries("series_1", seriesTimestamp, prompb.Label{Name: "job", Value: "test"}) + series2, _ := generateSeries("series_2", series2Timestamp, prompb.Label{Name: "job", Value: "test"}) + + res, err := c.Push(series1) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + + res, err = c.Push(series2) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + + // Wait until the TSDB head is compacted and shipped to the storage. + // The shipped block contains the 1st series, while the 2ns series is in the head. + require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(1), "cortex_ingester_shipper_uploads_total")) + require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(2), "cortex_ingester_memory_series_created_total")) + require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(1), "cortex_ingester_memory_series_removed_total")) + require.NoError(t, ingester.WaitSumMetrics(e2e.Equals(1), "cortex_ingester_memory_series")) + + queryFrontend := e2ecortex.NewQueryFrontendWithConfigFile("query-frontend", "", flags, "") + require.NoError(t, s.Start(queryFrontend)) + + // Start the querier and store-gateway, and configure them to frequently sync blocks fast enough to trigger consistency check. + storeGateway := e2ecortex.NewStoreGateway("store-gateway", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), mergeFlags(flags, map[string]string{ + "-blocks-storage.bucket-store.sync-interval": "5s", + }), "") + querier := e2ecortex.NewQuerier("querier", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), mergeFlags(flags, map[string]string{ + "-blocks-storage.bucket-store.sync-interval": "5s", + "-querier.frontend-address": queryFrontend.NetworkGRPCEndpoint(), + }), "") + require.NoError(t, s.StartAndWaitReady(querier, storeGateway)) + + // Wait until the querier and store-gateway have updated the ring, and wait until the blocks are old enough for consistency check + require.NoError(t, querier.WaitSumMetrics(e2e.Equals(512*2), "cortex_ring_tokens_total")) + require.NoError(t, storeGateway.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + require.NoError(t, querier.WaitSumMetricsWithOptions(e2e.GreaterOrEqual(4), []string{"cortex_querier_blocks_scan_duration_seconds"}, e2e.WithMetricCount)) + + // Query back the series. + c, err = e2ecortex.NewClient("", queryFrontend.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + // We expect request to hit chunk pool exhaustion. + resp, body, err := c.QueryRaw(`{job="test"}`, series2Timestamp) + require.NoError(t, err) + require.Equal(t, http.StatusInternalServerError, resp.StatusCode) + require.Contains(t, string(body), pool.ErrPoolExhausted.Error()) + // We shouldn't be able to see any retries. + require.NoError(t, queryFrontend.WaitSumMetricsWithOptions(e2e.Equals(0), []string{"cortex_query_frontend_retries"}, e2e.WaitMissingMetrics)) +} diff --git a/pkg/frontend/transport/retry.go b/pkg/frontend/transport/retry.go index bf010745ac..bf1b4faa1c 100644 --- a/pkg/frontend/transport/retry.go +++ b/pkg/frontend/transport/retry.go @@ -2,10 +2,16 @@ package transport import ( "context" + "errors" + "strings" + "unsafe" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/thanos-io/thanos/pkg/pool" "github.com/weaveworks/common/httpgrpc" + + "github.com/cortexproject/cortex/pkg/querier/tripperware" ) type Retry struct { @@ -44,13 +50,38 @@ func (r *Retry) Do(ctx context.Context, f func() (*httpgrpc.HTTPResponse, error) } resp, err = f() - if err != nil && err != context.Canceled { + if err != nil && !errors.Is(err, context.Canceled) { continue // Retryable } else if resp != nil && resp.Code/100 == 5 { - continue // Retryable - } else { - break + // This is not that efficient as we might decode the body multiple + // times. But error response should be too large so we should be fine. + // TODO: investigate ways to decode only once. + body, err := tripperware.BodyBufferFromHTTPGRPCResponse(resp, nil) + if err != nil { + return nil, err + } + + if tries < r.maxRetries-1 && isBodyRetryable(yoloString(body)) { + continue + } + + return resp, nil } + break + } + if err != nil { + return nil, err } + return resp, err } + +func isBodyRetryable(body string) bool { + // If pool exhausted, retry at query frontend might make things worse. + // Rely on retries at querier level only. + return !strings.Contains(body, pool.ErrPoolExhausted.Error()) +} + +func yoloString(b []byte) string { + return *((*string)(unsafe.Pointer(&b))) +} diff --git a/pkg/frontend/transport/retry_test.go b/pkg/frontend/transport/retry_test.go index a79c083640..3b8ead1a89 100644 --- a/pkg/frontend/transport/retry_test.go +++ b/pkg/frontend/transport/retry_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/thanos-io/thanos/pkg/pool" "github.com/weaveworks/common/httpgrpc" "go.uber.org/atomic" ) @@ -29,3 +30,25 @@ func TestRetry(t *testing.T) { require.NoError(t, err) require.Equal(t, int32(200), res.Code) } + +func TestNoRetryOnChunkPoolExhaustion(t *testing.T) { + tries := atomic.NewInt64(3) + r := NewRetry(3, nil) + ctx := context.Background() + res, err := r.Do(ctx, func() (*httpgrpc.HTTPResponse, error) { + try := tries.Dec() + if try > 1 { + return &httpgrpc.HTTPResponse{ + Code: 500, + Body: []byte(pool.ErrPoolExhausted.Error()), + }, nil + } + return &httpgrpc.HTTPResponse{ + Code: 200, + }, nil + + }) + + require.NoError(t, err) + require.Equal(t, int32(500), res.Code) +} diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index b3e7f9073c..f7dac096c7 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -23,6 +23,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/extprom" + "github.com/thanos-io/thanos/pkg/pool" "github.com/thanos-io/thanos/pkg/store/hintspb" "github.com/thanos-io/thanos/pkg/store/storepb" "github.com/thanos-io/thanos/pkg/strutil" @@ -46,6 +47,7 @@ import ( "github.com/cortexproject/cortex/pkg/util/limiter" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/math" + "github.com/cortexproject/cortex/pkg/util/multierror" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/cortexproject/cortex/pkg/util/validation" @@ -341,10 +343,10 @@ func (q *blocksStoreQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, convertedMatchers = convertMatchersToLabelMatcher(matchers) ) - queryFunc := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error) { - nameSets, warnings, queriedBlocks, err := q.fetchLabelNamesFromStore(spanCtx, clients, minT, maxT, convertedMatchers) + queryFunc := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error, error) { + nameSets, warnings, queriedBlocks, err, retryableError := q.fetchLabelNamesFromStore(spanCtx, clients, minT, maxT, convertedMatchers) if err != nil { - return nil, err + return nil, err, retryableError } resMtx.Lock() @@ -352,7 +354,7 @@ func (q *blocksStoreQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, resWarnings = append(resWarnings, warnings...) resMtx.Unlock() - return queriedBlocks, nil + return queriedBlocks, nil, retryableError } err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, queryFunc) @@ -376,10 +378,10 @@ func (q *blocksStoreQuerier) LabelValues(name string, matchers ...*labels.Matche resultMtx sync.Mutex ) - queryFunc := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error) { - valueSets, warnings, queriedBlocks, err := q.fetchLabelValuesFromStore(spanCtx, name, clients, minT, maxT, matchers...) + queryFunc := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error, error) { + valueSets, warnings, queriedBlocks, err, retryableError := q.fetchLabelValuesFromStore(spanCtx, name, clients, minT, maxT, matchers...) if err != nil { - return nil, err + return nil, err, retryableError } resultMtx.Lock() @@ -387,7 +389,7 @@ func (q *blocksStoreQuerier) LabelValues(name string, matchers ...*labels.Matche resWarnings = append(resWarnings, warnings...) resultMtx.Unlock() - return queriedBlocks, nil + return queriedBlocks, nil, retryableError } err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, queryFunc) @@ -421,11 +423,10 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* resultMtx sync.Mutex ) - queryFunc := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error) { - seriesSets, queriedBlocks, warnings, numChunks, err := q.fetchSeriesFromStores(spanCtx, sp, clients, minT, maxT, matchers, maxChunksLimit, leftChunksLimit) + queryFunc := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error, error) { + seriesSets, queriedBlocks, warnings, numChunks, err, retryableError := q.fetchSeriesFromStores(spanCtx, sp, clients, minT, maxT, matchers, maxChunksLimit, leftChunksLimit) if err != nil { - - return nil, err + return nil, err, retryableError } resultMtx.Lock() @@ -440,7 +441,7 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* } resultMtx.Unlock() - return queriedBlocks, nil + return queriedBlocks, nil, retryableError } err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, queryFunc) @@ -458,7 +459,7 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* } func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logger log.Logger, minT, maxT int64, - queryFunc func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error)) error { + queryFunc func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error, error)) error { // If queryStoreAfter is enabled, we do manipulate the query maxt to query samples up until // now - queryStoreAfter, because the most recent time range is covered by ingesters. This // optimization is particularly important for the blocks storage because can be used to skip @@ -501,6 +502,9 @@ func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logg resQueriedBlocks = []ulid.ULID(nil) attemptedBlocksZones = make(map[ulid.ULID]map[string]int, len(remainingBlocks)) + + queriedBlocks []ulid.ULID + retryableError error ) for attempt := 1; attempt <= maxFetchSeriesAttempts; attempt++ { @@ -521,7 +525,7 @@ func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logg // Fetch series from stores. If an error occur we do not retry because retries // are only meant to cover missing blocks. - queriedBlocks, err := queryFunc(clients, minT, maxT) + queriedBlocks, err, retryableError = queryFunc(clients, minT, maxT) if err != nil { return err } @@ -553,6 +557,12 @@ func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logg remainingBlocks = missingBlocks } + // After we exhausted retries, if retryable error is not nil return the retryable error. + // It can be helpful to know whether we need to retry more or not. + if retryableError != nil { + return retryableError + } + // We've not been able to query all expected blocks after all retries. level.Warn(util_log.WithContext(ctx, logger)).Log("msg", "failed consistency check", "err", err) return fmt.Errorf("consistency check failed because some blocks were not queried: %s", strings.Join(convertULIDsToString(remainingBlocks), " ")) @@ -567,7 +577,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( matchers []*labels.Matcher, maxChunksLimit int, leftChunksLimit int, -) ([]storage.SeriesSet, []ulid.ULID, storage.Warnings, int, error) { +) ([]storage.SeriesSet, []ulid.ULID, storage.Warnings, int, error, error) { var ( reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, cortex_tsdb.TenantIDExternalLabel, q.userID) g, gCtx = errgroup.WithContext(reqCtx) @@ -579,11 +589,13 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( spanLog = spanlogger.FromContext(ctx) queryLimiter = limiter.QueryLimiterFromContextWithFallback(ctx) reqStats = stats.FromContext(ctx) + merrMtx = sync.Mutex{} + merr = multierror.MultiError{} ) matchers, shardingInfo, err := querysharding.ExtractShardingInfo(matchers) if err != nil { - return nil, nil, nil, 0, err + return nil, nil, nil, 0, err, merr.Err() } convertedMatchers := convertMatchersToLabelMatcher(matchers) @@ -614,6 +626,9 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( if err != nil { if isRetryableError(err) { level.Warn(spanLog).Log("err", errors.Wrapf(err, "failed to fetch series from %s due to retryable error", c.RemoteAddress())) + merrMtx.Lock() + merr.Add(err) + merrMtx.Unlock() return nil } return errors.Wrapf(err, "failed to fetch series from %s", c.RemoteAddress()) @@ -637,6 +652,9 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( if isRetryableError(err) { level.Warn(spanLog).Log("err", errors.Wrapf(err, "failed to receive series from %s due to retryable error", c.RemoteAddress())) + merrMtx.Lock() + merr.Add(err) + merrMtx.Unlock() return nil } @@ -773,10 +791,10 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( // Wait until all client requests complete. if err := g.Wait(); err != nil { - return nil, nil, nil, 0, err + return nil, nil, nil, 0, err, merr.Err() } - return seriesSets, queriedBlocks, warnings, int(numChunks.Load()), nil + return seriesSets, queriedBlocks, warnings, int(numChunks.Load()), nil, merr.Err() } func (q *blocksStoreQuerier) fetchLabelNamesFromStore( @@ -785,7 +803,7 @@ func (q *blocksStoreQuerier) fetchLabelNamesFromStore( minT int64, maxT int64, matchers []storepb.LabelMatcher, -) ([][]string, storage.Warnings, []ulid.ULID, error) { +) ([][]string, storage.Warnings, []ulid.ULID, error, error) { var ( reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, cortex_tsdb.TenantIDExternalLabel, q.userID) g, gCtx = errgroup.WithContext(reqCtx) @@ -794,6 +812,8 @@ func (q *blocksStoreQuerier) fetchLabelNamesFromStore( warnings = storage.Warnings(nil) queriedBlocks = []ulid.ULID(nil) spanLog = spanlogger.FromContext(ctx) + merrMtx = sync.Mutex{} + merr = multierror.MultiError{} ) // Concurrently fetch series from all clients. @@ -812,6 +832,9 @@ func (q *blocksStoreQuerier) fetchLabelNamesFromStore( if err != nil { if isRetryableError(err) { level.Warn(spanLog).Log("err", errors.Wrapf(err, "failed to fetch label names from %s due to retryable error", c.RemoteAddress())) + merrMtx.Lock() + merr.Add(err) + merrMtx.Unlock() return nil } @@ -868,10 +891,10 @@ func (q *blocksStoreQuerier) fetchLabelNamesFromStore( // Wait until all client requests complete. if err := g.Wait(); err != nil { - return nil, nil, nil, err + return nil, nil, nil, err, merr.Err() } - return nameSets, warnings, queriedBlocks, nil + return nameSets, warnings, queriedBlocks, nil, merr.Err() } func (q *blocksStoreQuerier) fetchLabelValuesFromStore( @@ -881,7 +904,7 @@ func (q *blocksStoreQuerier) fetchLabelValuesFromStore( minT int64, maxT int64, matchers ...*labels.Matcher, -) ([][]string, storage.Warnings, []ulid.ULID, error) { +) ([][]string, storage.Warnings, []ulid.ULID, error, error) { var ( reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, cortex_tsdb.TenantIDExternalLabel, q.userID) g, gCtx = errgroup.WithContext(reqCtx) @@ -890,6 +913,8 @@ func (q *blocksStoreQuerier) fetchLabelValuesFromStore( warnings = storage.Warnings(nil) queriedBlocks = []ulid.ULID(nil) spanLog = spanlogger.FromContext(ctx) + merrMtx = sync.Mutex{} + merr = multierror.MultiError{} ) // Concurrently fetch series from all clients. @@ -908,6 +933,9 @@ func (q *blocksStoreQuerier) fetchLabelValuesFromStore( if err != nil { if isRetryableError(err) { level.Warn(spanLog).Log("err", errors.Wrapf(err, "failed to fetch label values from %s due to retryable error", c.RemoteAddress())) + merrMtx.Lock() + merr.Add(err) + merrMtx.Unlock() return nil } @@ -967,10 +995,10 @@ func (q *blocksStoreQuerier) fetchLabelValuesFromStore( // Wait until all client requests complete. if err := g.Wait(); err != nil { - return nil, nil, nil, err + return nil, nil, nil, err, merr.Err() } - return valueSets, warnings, queriedBlocks, nil + return valueSets, warnings, queriedBlocks, nil, merr.Err() } func createSeriesRequest(minT, maxT int64, matchers []storepb.LabelMatcher, shardingInfo *storepb.ShardInfo, skipChunks bool, blockIDs []ulid.ULID) (*storepb.SeriesRequest, error) { @@ -1126,6 +1154,9 @@ func isRetryableError(err error) bool { // https://github.com/grpc/grpc-go/blob/03172006f5d168fc646d87928d85cb9c4a480291/clientconn.go#L67 case codes.Canceled: return strings.Contains(err.Error(), "grpc: the client connection is closing") + case codes.Unknown: + // Catch chunks pool exhaustion error only. + return strings.Contains(err.Error(), pool.ErrPoolExhausted.Error()) default: return false } diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index a01d4f2893..23114f2663 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -25,6 +25,7 @@ import ( "github.com/stretchr/testify/require" "github.com/thanos-io/promql-engine/engine" "github.com/thanos-io/promql-engine/logicalplan" + "github.com/thanos-io/thanos/pkg/pool" "github.com/thanos-io/thanos/pkg/store/hintspb" "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" @@ -668,6 +669,35 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, }, }, + "multiple store-gateways has the block, but one of them fails to return due to chunk pool exhaustion": { + finderResult: bucketindex.Blocks{ + {ID: block1}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{ + remoteAddr: "1.1.1.1", + mockedSeriesErr: status.Error(codes.Unknown, pool.ErrPoolExhausted.Error()), + }: {block1}, + }, + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, minT, 2), + mockHintsResponse(block1), + }}: {block1}, + }, + }, + limits: &blocksStoreLimitsMock{}, + queryLimiter: noOpQueryLimiter, + expectedSeries: []seriesResult{ + { + lbls: labels.New(metricNameLabel, series1Label), + values: []valueResult{ + {t: minT, v: 2}, + }, + }, + }, + }, "all store-gateways return PermissionDenied": { finderResult: bucketindex.Blocks{ {ID: block1}, diff --git a/pkg/querier/tripperware/query.go b/pkg/querier/tripperware/query.go index 42de413e52..f893d20b66 100644 --- a/pkg/querier/tripperware/query.go +++ b/pkg/querier/tripperware/query.go @@ -231,6 +231,25 @@ func BodyBuffer(res *http.Response, logger log.Logger) ([]byte, error) { return buf.Bytes(), nil } +func BodyBufferFromHTTPGRPCResponse(res *httpgrpc.HTTPResponse, logger log.Logger) ([]byte, error) { + // if the response is gziped, lets unzip it here + headers := http.Header{} + for _, h := range res.Headers { + headers[h.Key] = h.Values + } + if strings.EqualFold(headers.Get("Content-Encoding"), "gzip") { + gReader, err := gzip.NewReader(bytes.NewBuffer(res.Body)) + if err != nil { + return nil, err + } + defer runutil.CloseWithLogOnErr(logger, gReader, "close gzip reader") + + return io.ReadAll(gReader) + } + + return res.Body, nil +} + func StatsMerge(stats map[int64]*PrometheusResponseQueryableSamplesStatsPerStep) *PrometheusResponseStats { keys := make([]int64, 0, len(stats)) for key := range stats { From 189e6c5fcd03912bc602268f645bfdda42ae21b5 Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Fri, 22 Sep 2023 15:08:51 -0700 Subject: [PATCH 03/13] Do not encrypt deletion marker with CMK key (#5575) Signed-off-by: Alan Protasio --- CHANGELOG.md | 1 + pkg/compactor/blocks_cleaner.go | 2 +- pkg/compactor/blocks_cleaner_test.go | 4 ++-- pkg/ingester/ingester_test.go | 4 ++-- pkg/purger/tenant_deletion_api.go | 2 +- pkg/storage/tsdb/tenant_deletion_mark.go | 7 +++---- pkg/storage/tsdb/tenant_deletion_mark_test.go | 16 ++++++++++++++-- 7 files changed, 24 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cd6df786cf..edb6250b5d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ * [CHANGE] Bucket Index: Add `series_max_size` and `chunk_max_size` to bucket index. #5489 * [CHANGE] StoreGateway: Rename `cortex_bucket_store_chunk_pool_returned_bytes_total` and `cortex_bucket_store_chunk_pool_requested_bytes_total` to `cortex_bucket_store_chunk_pool_operation_bytes_total`. #5552 * [CHANGE] Query Frontend/Querier: Make build info API disabled by default and add feature flag `api.build-info-enabled` to enable it. #5533 +* [CHANGE] Purger: Do no use S3 tenant kms key when uploading deletion marker. #5575 * [FEATURE] Store Gateway: Add `max_downloaded_bytes_per_request` to limit max bytes to download per store gateway request. * [FEATURE] Added 2 flags `-alertmanager.alertmanager-client.grpc-max-send-msg-size` and ` -alertmanager.alertmanager-client.grpc-max-recv-msg-size` to configure alert manager grpc client message size limits. #5338 * [FEATURE] Query Frontend: Add `cortex_rejected_queries_total` metric for throttled queries. #5356 diff --git a/pkg/compactor/blocks_cleaner.go b/pkg/compactor/blocks_cleaner.go index c6523df3f3..071c91d559 100644 --- a/pkg/compactor/blocks_cleaner.go +++ b/pkg/compactor/blocks_cleaner.go @@ -277,7 +277,7 @@ func (c *BlocksCleaner) deleteUserMarkedForDeletion(ctx context.Context, userID if deletedBlocks > 0 || mark.FinishedTime == 0 { level.Info(userLogger).Log("msg", "updating finished time in tenant deletion mark") mark.FinishedTime = time.Now().Unix() - return errors.Wrap(cortex_tsdb.WriteTenantDeletionMark(ctx, c.bucketClient, userID, c.cfgProvider, mark), "failed to update tenant deletion mark") + return errors.Wrap(cortex_tsdb.WriteTenantDeletionMark(ctx, c.bucketClient, userID, mark), "failed to update tenant deletion mark") } if time.Since(time.Unix(mark.FinishedTime, 0)) < c.cfg.TenantCleanupDelay { diff --git a/pkg/compactor/blocks_cleaner_test.go b/pkg/compactor/blocks_cleaner_test.go index 6391e243bb..14e496d17f 100644 --- a/pkg/compactor/blocks_cleaner_test.go +++ b/pkg/compactor/blocks_cleaner_test.go @@ -142,14 +142,14 @@ func testBlocksCleanerWithOptions(t *testing.T, options testBlocksCleanerOptions createDeletionMark(t, bucketClient, "user-2", block7, now.Add(-deletionDelay).Add(-time.Hour)) // Block reached the deletion threshold. // Blocks for user-3, marked for deletion. - require.NoError(t, tsdb.WriteTenantDeletionMark(context.Background(), bucketClient, "user-3", nil, tsdb.NewTenantDeletionMark(time.Now()))) + require.NoError(t, tsdb.WriteTenantDeletionMark(context.Background(), bucketClient, "user-3", tsdb.NewTenantDeletionMark(time.Now()))) block9 := createTSDBBlock(t, bucketClient, "user-3", 10, 30, nil) block10 := createTSDBBlock(t, bucketClient, "user-3", 30, 50, nil) // User-4 with no more blocks, but couple of mark and debug files. Should be fully deleted. user4Mark := tsdb.NewTenantDeletionMark(time.Now()) user4Mark.FinishedTime = time.Now().Unix() - 60 // Set to check final user cleanup. - require.NoError(t, tsdb.WriteTenantDeletionMark(context.Background(), bucketClient, "user-4", nil, user4Mark)) + require.NoError(t, tsdb.WriteTenantDeletionMark(context.Background(), bucketClient, "user-4", user4Mark)) user4DebugMetaFile := path.Join("user-4", block.DebugMetas, "meta.json") require.NoError(t, bucketClient.Upload(context.Background(), user4DebugMetaFile, strings.NewReader("some random content here"))) diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 2b6f7e0201..fce1c60b1c 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -2731,7 +2731,7 @@ func TestIngester_dontShipBlocksWhenTenantDeletionMarkerIsPresent(t *testing.T) numObjects := len(bucket.Objects()) require.NotZero(t, numObjects) - require.NoError(t, cortex_tsdb.WriteTenantDeletionMark(context.Background(), bucket, userID, nil, cortex_tsdb.NewTenantDeletionMark(time.Now()))) + require.NoError(t, cortex_tsdb.WriteTenantDeletionMark(context.Background(), bucket, userID, cortex_tsdb.NewTenantDeletionMark(time.Now()))) numObjects++ // For deletion marker db := i.getTSDB(userID) @@ -2763,7 +2763,7 @@ func TestIngester_seriesCountIsCorrectAfterClosingTSDBForDeletedTenant(t *testin bucket := objstore.NewInMemBucket() // Write tenant deletion mark. - require.NoError(t, cortex_tsdb.WriteTenantDeletionMark(context.Background(), bucket, userID, nil, cortex_tsdb.NewTenantDeletionMark(time.Now()))) + require.NoError(t, cortex_tsdb.WriteTenantDeletionMark(context.Background(), bucket, userID, cortex_tsdb.NewTenantDeletionMark(time.Now()))) i.TSDBState.bucket = bucket require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) diff --git a/pkg/purger/tenant_deletion_api.go b/pkg/purger/tenant_deletion_api.go index 331066fd71..a80b9c0f7c 100644 --- a/pkg/purger/tenant_deletion_api.go +++ b/pkg/purger/tenant_deletion_api.go @@ -52,7 +52,7 @@ func (api *TenantDeletionAPI) DeleteTenant(w http.ResponseWriter, r *http.Reques return } - err = cortex_tsdb.WriteTenantDeletionMark(r.Context(), api.bucketClient, userID, api.cfgProvider, cortex_tsdb.NewTenantDeletionMark(time.Now())) + err = cortex_tsdb.WriteTenantDeletionMark(r.Context(), api.bucketClient, userID, cortex_tsdb.NewTenantDeletionMark(time.Now())) if err != nil { level.Error(api.logger).Log("msg", "failed to write tenant deletion mark", "user", userID, "err", err) diff --git a/pkg/storage/tsdb/tenant_deletion_mark.go b/pkg/storage/tsdb/tenant_deletion_mark.go index 88c5c57b59..b4b8022ed0 100644 --- a/pkg/storage/tsdb/tenant_deletion_mark.go +++ b/pkg/storage/tsdb/tenant_deletion_mark.go @@ -11,7 +11,6 @@ import ( "github.com/pkg/errors" "github.com/thanos-io/objstore" - "github.com/cortexproject/cortex/pkg/storage/bucket" util_log "github.com/cortexproject/cortex/pkg/util/log" ) @@ -38,15 +37,15 @@ func TenantDeletionMarkExists(ctx context.Context, bkt objstore.BucketReader, us } // Uploads deletion mark to the tenant location in the bucket. -func WriteTenantDeletionMark(ctx context.Context, bkt objstore.Bucket, userID string, cfgProvider bucket.TenantConfigProvider, mark *TenantDeletionMark) error { - bkt = bucket.NewUserBucketClient(userID, bkt, cfgProvider) +func WriteTenantDeletionMark(ctx context.Context, bkt objstore.Bucket, userID string, mark *TenantDeletionMark) error { + markerFile := path.Join(userID, TenantDeletionMarkPath) data, err := json.Marshal(mark) if err != nil { return errors.Wrap(err, "serialize tenant deletion mark") } - return errors.Wrap(bkt.Upload(ctx, TenantDeletionMarkPath, bytes.NewReader(data)), "upload tenant deletion mark") + return errors.Wrap(bkt.Upload(ctx, markerFile, bytes.NewReader(data)), "upload tenant deletion mark") } // Returns tenant deletion mark for given user, if it exists. If it doesn't exist, returns nil mark, and no error. diff --git a/pkg/storage/tsdb/tenant_deletion_mark_test.go b/pkg/storage/tsdb/tenant_deletion_mark_test.go index e46d54c8ca..1d3ced41a4 100644 --- a/pkg/storage/tsdb/tenant_deletion_mark_test.go +++ b/pkg/storage/tsdb/tenant_deletion_mark_test.go @@ -13,8 +13,9 @@ func TestTenantDeletionMarkExists(t *testing.T) { const username = "user" for name, tc := range map[string]struct { - objects map[string][]byte - exists bool + objects map[string][]byte + exists bool + deletedUsers []string }{ "empty": { objects: nil, @@ -35,6 +36,13 @@ func TestTenantDeletionMarkExists(t *testing.T) { }, exists: true, }, + "mark exists - upload via WriteTenantDeletionMark": { + objects: map[string][]byte{ + "user/01EQK4QKFHVSZYVJ908Y7HH9E0/meta.json": []byte("data"), + }, + deletedUsers: []string{"user"}, + exists: true, + }, } { t.Run(name, func(t *testing.T) { bkt := objstore.NewInMemBucket() @@ -43,6 +51,10 @@ func TestTenantDeletionMarkExists(t *testing.T) { require.NoError(t, bkt.Upload(context.Background(), objName, bytes.NewReader(data))) } + for _, user := range tc.deletedUsers { + require.NoError(t, WriteTenantDeletionMark(context.Background(), bkt, user, &TenantDeletionMark{})) + } + res, err := TenantDeletionMarkExists(context.Background(), bkt, username) require.NoError(t, err) require.Equal(t, tc.exists, res) From 2a7fdb0322983a6fbcb01a71b160a11a34fd3c6b Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Tue, 26 Sep 2023 10:53:31 -0700 Subject: [PATCH 04/13] Update thanos to latest main (#5580) * update thanos to latest main Signed-off-by: Ben Ye * update changelog Signed-off-by: Ben Ye --------- Signed-off-by: Ben Ye --- CHANGELOG.md | 3 +- go.mod | 2 +- go.sum | 4 +-- pkg/storegateway/bucket_store_metrics.go | 7 ++++ pkg/storegateway/bucket_store_metrics_test.go | 20 +++++++++++ .../pkg/block/indexheader/binary_reader.go | 28 +++++++++------ .../thanos-io/thanos/pkg/store/bucket.go | 7 ++++ .../thanos-io/thanos/pkg/store/cache/cache.go | 6 ++++ .../thanos/pkg/store/cache/inmemory.go | 9 +++++ .../thanos/pkg/store/cache/memcached.go | 34 ++++++++++++++----- vendor/modules.txt | 2 +- 11 files changed, 97 insertions(+), 25 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index edb6250b5d..b3752cf625 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,7 +36,8 @@ * [FEATURE] Ruler: Support for filtering rules in the API. #5417 * [FEATURE] Compactor: Add `-compactor.ring.tokens-file-path` to store generated tokens locally. #5432 * [FEATURE] Query Frontend: Add `-frontend.retry-on-too-many-outstanding-requests` to re-enqueue 429 requests if there are multiple query-schedulers available. #5496 -* [FEATURE] Store Gateway: Add `-blocks-storage.bucket-store.max-inflight-requests`for store gateways to reject further requests upon reaching the limit. #5553 +* [FEATURE] Store Gateway: Add `-blocks-storage.bucket-store.max-inflight-requests` for store gateways to reject further requests upon reaching the limit. #5553 +* [FEATURE] Store Gateway: Add `cortex_bucket_store_block_load_duration_seconds` histogram to track time to load blocks. #5580 * [ENHANCEMENT] Distributor/Ingester: Add span on push path #5319 * [ENHANCEMENT] Support object storage backends for runtime configuration file. #5292 * [ENHANCEMENT] Query Frontend: Reject subquery with too small step size. #5323 diff --git a/go.mod b/go.mod index 2043387dfb..db3e9e7014 100644 --- a/go.mod +++ b/go.mod @@ -53,7 +53,7 @@ require ( github.com/stretchr/testify v1.8.4 github.com/thanos-io/objstore v0.0.0-20230921130928-63a603e651ed github.com/thanos-io/promql-engine v0.0.0-20230821193351-e1ae4275b96e - github.com/thanos-io/thanos v0.32.4-0.20230921182036-6257767ec9d0 + github.com/thanos-io/thanos v0.32.4-0.20230926060504-20d29008068f github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d go.etcd.io/etcd/api/v3 v3.5.9 diff --git a/go.sum b/go.sum index c94e572657..f77d8bb131 100644 --- a/go.sum +++ b/go.sum @@ -1212,8 +1212,8 @@ github.com/thanos-io/objstore v0.0.0-20230921130928-63a603e651ed h1:iWQdY3S6DpWj github.com/thanos-io/objstore v0.0.0-20230921130928-63a603e651ed/go.mod h1:oJ82xgcBDzGJrEgUsjlTj6n01+ZWUMMUR8BlZzX5xDE= github.com/thanos-io/promql-engine v0.0.0-20230821193351-e1ae4275b96e h1:kwsFCU8eSkZehbrAN3nXPw5RdMHi/Bok/y8l2C4M+gk= github.com/thanos-io/promql-engine v0.0.0-20230821193351-e1ae4275b96e/go.mod h1:+T/ZYNCGybT6eTsGGvVtGb63nT1cvUmH6MjqRrcQoKw= -github.com/thanos-io/thanos v0.32.4-0.20230921182036-6257767ec9d0 h1:T9Vot+BQao6M6j8F0JQbseAqtniOw1Csz+QHRRRwF48= -github.com/thanos-io/thanos v0.32.4-0.20230921182036-6257767ec9d0/go.mod h1:Px5Boq60s+2WwR+V4v4oxgmxfw9WHrwMwjRou6pkUNw= +github.com/thanos-io/thanos v0.32.4-0.20230926060504-20d29008068f h1:OdZZLgF2eYIiad7h4WeUPkew7Uq6F9vFPg3aDZfMQLY= +github.com/thanos-io/thanos v0.32.4-0.20230926060504-20d29008068f/go.mod h1:Px5Boq60s+2WwR+V4v4oxgmxfw9WHrwMwjRou6pkUNw= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= diff --git a/pkg/storegateway/bucket_store_metrics.go b/pkg/storegateway/bucket_store_metrics.go index cb1cf1152b..f351940bcf 100644 --- a/pkg/storegateway/bucket_store_metrics.go +++ b/pkg/storegateway/bucket_store_metrics.go @@ -16,6 +16,7 @@ type BucketStoreMetrics struct { blockLoadFailures *prometheus.Desc blockDrops *prometheus.Desc blockDropFailures *prometheus.Desc + blockLoadDuration *prometheus.Desc blocksLoaded *prometheus.Desc seriesDataTouched *prometheus.Desc seriesDataFetched *prometheus.Desc @@ -75,6 +76,10 @@ func NewBucketStoreMetrics() *BucketStoreMetrics { "cortex_bucket_store_block_drop_failures_total", "Total number of local blocks that failed to be dropped.", nil, nil), + blockLoadDuration: prometheus.NewDesc( + "cortex_bucket_store_block_load_duration_seconds", + "The total time taken to load a block in seconds.", + nil, nil), blocksLoaded: prometheus.NewDesc( "cortex_bucket_store_blocks_loaded", "Number of currently loaded blocks.", @@ -228,6 +233,7 @@ func (m *BucketStoreMetrics) Describe(out chan<- *prometheus.Desc) { out <- m.blockLoadFailures out <- m.blockDrops out <- m.blockDropFailures + out <- m.blockLoadDuration out <- m.blocksLoaded out <- m.seriesDataTouched out <- m.seriesDataFetched @@ -274,6 +280,7 @@ func (m *BucketStoreMetrics) Collect(out chan<- prometheus.Metric) { data.SendSumOfCounters(out, m.blockLoadFailures, "thanos_bucket_store_block_load_failures_total") data.SendSumOfCounters(out, m.blockDrops, "thanos_bucket_store_block_drops_total") data.SendSumOfCounters(out, m.blockDropFailures, "thanos_bucket_store_block_drop_failures_total") + data.SendSumOfHistograms(out, m.blockLoadDuration, "thanos_bucket_store_block_load_duration_seconds") data.SendSumOfGaugesPerUser(out, m.blocksLoaded, "thanos_bucket_store_blocks_loaded") diff --git a/pkg/storegateway/bucket_store_metrics_test.go b/pkg/storegateway/bucket_store_metrics_test.go index 37bccc1d57..650a015a49 100644 --- a/pkg/storegateway/bucket_store_metrics_test.go +++ b/pkg/storegateway/bucket_store_metrics_test.go @@ -41,6 +41,19 @@ func TestBucketStoreMetrics(t *testing.T) { # HELP cortex_bucket_store_block_drops_total Total number of local blocks that were dropped. # TYPE cortex_bucket_store_block_drops_total counter cortex_bucket_store_block_drops_total 90076 + # HELP cortex_bucket_store_block_load_duration_seconds The total time taken to load a block in seconds. + # TYPE cortex_bucket_store_block_load_duration_seconds histogram + cortex_bucket_store_block_load_duration_seconds_bucket{le="0.1"} 0 + cortex_bucket_store_block_load_duration_seconds_bucket{le="0.5"} 0 + cortex_bucket_store_block_load_duration_seconds_bucket{le="1"} 0 + cortex_bucket_store_block_load_duration_seconds_bucket{le="10"} 0 + cortex_bucket_store_block_load_duration_seconds_bucket{le="20"} 0 + cortex_bucket_store_block_load_duration_seconds_bucket{le="30"} 0 + cortex_bucket_store_block_load_duration_seconds_bucket{le="60"} 0 + cortex_bucket_store_block_load_duration_seconds_bucket{le="120"} 0 + cortex_bucket_store_block_load_duration_seconds_bucket{le="+Inf"} 3 + cortex_bucket_store_block_load_duration_seconds_sum 112595 + cortex_bucket_store_block_load_duration_seconds_count 3 # HELP cortex_bucket_store_block_drop_failures_total Total number of local blocks that failed to be dropped. # TYPE cortex_bucket_store_block_drop_failures_total counter @@ -601,6 +614,7 @@ func populateMockedBucketStoreMetrics(base float64) *prometheus.Registry { m.blockLoadFailures.Add(3 * base) m.blockDrops.Add(4 * base) m.blockDropFailures.Add(5 * base) + m.blockLoadDuration.Observe(5 * base) m.seriesDataTouched.WithLabelValues("touched-a").Observe(6 * base) m.seriesDataTouched.WithLabelValues("touched-b").Observe(7 * base) m.seriesDataTouched.WithLabelValues("touched-c").Observe(8 * base) @@ -684,6 +698,7 @@ type mockedBucketStoreMetrics struct { blockLoadFailures prometheus.Counter blockDrops prometheus.Counter blockDropFailures prometheus.Counter + blockLoadDuration prometheus.Histogram seriesDataTouched *prometheus.HistogramVec seriesDataFetched *prometheus.HistogramVec seriesDataSizeTouched *prometheus.HistogramVec @@ -741,6 +756,11 @@ func newMockedBucketStoreMetrics(reg prometheus.Registerer) *mockedBucketStoreMe Name: "thanos_bucket_store_block_drop_failures_total", Help: "Total number of local blocks that failed to be dropped.", }) + m.blockLoadDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + Name: "thanos_bucket_store_block_load_duration_seconds", + Help: "The total time taken to load a block in seconds.", + Buckets: []float64{0.1, 0.5, 1, 10, 20, 30, 60, 120}, + }) m.blocksLoaded = promauto.With(reg).NewGauge(prometheus.GaugeOpts{ Name: "thanos_bucket_store_blocks_loaded", Help: "Number of currently loaded blocks.", diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go index 16ef73ac3b..7dbed1bec2 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go @@ -505,7 +505,8 @@ type BinaryReader struct { postingsV1 map[string]map[string]index.Range // Symbols struct that keeps only 1/postingOffsetsInMemSampling in the memory, then looks up the rest via mmap. - symbols *index.Symbols + // Use Symbols as interface for ease of testing. + symbols Symbols // Cache of the label name symbol lookups, // as there are not many and they are half of all lookups. nameSymbols map[uint32]string @@ -925,6 +926,16 @@ func (r *BinaryReader) postingsOffset(name string, values ...string) ([]index.Ra } func (r *BinaryReader) LookupSymbol(o uint32) (string, error) { + if r.indexVersion == index.FormatV1 { + // For v1 little trick is needed. Refs are actual offset inside index, not index-header. This is different + // of the header length difference between two files. + o += headerLen - index.HeaderLen + } + + if s, ok := r.nameSymbols[o]; ok { + return s, nil + } + cacheIndex := o % valueSymbolsCacheSize r.valueSymbolsMx.Lock() if cached := r.valueSymbols[cacheIndex]; cached.index == o && cached.symbol != "" { @@ -934,16 +945,6 @@ func (r *BinaryReader) LookupSymbol(o uint32) (string, error) { } r.valueSymbolsMx.Unlock() - if s, ok := r.nameSymbols[o]; ok { - return s, nil - } - - if r.indexVersion == index.FormatV1 { - // For v1 little trick is needed. Refs are actual offset inside index, not index-header. This is different - // of the header length difference between two files. - o += headerLen - index.HeaderLen - } - s, err := r.symbols.Lookup(o) if err != nil { return s, err @@ -1047,3 +1048,8 @@ func (b realByteSlice) Range(start, end int) []byte { func (b realByteSlice) Sub(start, end int) index.ByteSlice { return b[start:end] } + +type Symbols interface { + Lookup(o uint32) (string, error) + ReverseLookup(sym string) (uint32, error) +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go index 5a6f31c42d..bc1507a367 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -122,6 +122,7 @@ type bucketStoreMetrics struct { lastLoadedBlock prometheus.Gauge blockDrops prometheus.Counter blockDropFailures prometheus.Counter + blockLoadDuration prometheus.Histogram seriesDataTouched *prometheus.HistogramVec seriesDataFetched *prometheus.HistogramVec seriesDataSizeTouched *prometheus.HistogramVec @@ -185,6 +186,11 @@ func newBucketStoreMetrics(reg prometheus.Registerer) *bucketStoreMetrics { Name: "thanos_bucket_store_blocks_last_loaded_timestamp_seconds", Help: "Timestamp when last block got loaded.", }) + m.blockLoadDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + Name: "thanos_bucket_store_block_load_duration_seconds", + Help: "The total time taken to load a block in seconds.", + Buckets: []float64{0.1, 0.5, 1, 10, 20, 30, 60, 120}, + }) m.seriesDataTouched = promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "thanos_bucket_store_series_data_touched", @@ -727,6 +733,7 @@ func (s *BucketStore) addBlock(ctx context.Context, meta *metadata.Meta) (err er level.Warn(s.logger).Log("msg", "loading block failed", "elapsed", time.Since(start), "id", meta.ULID, "err", err) } else { level.Info(s.logger).Log("msg", "loaded new block", "elapsed", time.Since(start), "id", meta.ULID) + s.metrics.blockLoadDuration.Observe(time.Since(start).Seconds()) } }() s.metrics.blockLoads.Inc() diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/cache.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/cache.go index 87cdb17d96..360cdd67e5 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/cache.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/cache.go @@ -61,6 +61,7 @@ type commonMetrics struct { requestTotal *prometheus.CounterVec hitsTotal *prometheus.CounterVec dataSizeBytes *prometheus.HistogramVec + fetchLatency *prometheus.HistogramVec } func newCommonMetrics(reg prometheus.Registerer) *commonMetrics { @@ -80,6 +81,11 @@ func newCommonMetrics(reg prometheus.Registerer) *commonMetrics { 32, 256, 512, 1024, 32 * 1024, 256 * 1024, 512 * 1024, 1024 * 1024, 32 * 1024 * 1024, 64 * 1024 * 1024, 128 * 1024 * 1024, 256 * 1024 * 1024, 512 * 1024 * 1024, }, }, []string{"item_type"}), + fetchLatency: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ + Name: "thanos_store_index_cache_fetch_duration_seconds", + Help: "Histogram to track latency to fetch items from index cache", + Buckets: []float64{0.01, 0.1, 0.3, 0.6, 1, 3, 6, 10, 15, 20, 30, 45, 60, 90, 120}, + }, []string{"item_type"}), } } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go index 747199b414..e0077acc35 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go @@ -302,6 +302,9 @@ func (c *InMemoryIndexCache) StorePostings(blockID ulid.ULID, l labels.Label, v // FetchMultiPostings fetches multiple postings - each identified by a label - // and returns a map containing cache hits, along with a list of missing keys. func (c *InMemoryIndexCache) FetchMultiPostings(_ context.Context, blockID ulid.ULID, keys []labels.Label) (hits map[labels.Label][]byte, misses []labels.Label) { + timer := prometheus.NewTimer(c.commonMetrics.fetchLatency.WithLabelValues(cacheTypePostings)) + defer timer.ObserveDuration() + hits = map[labels.Label][]byte{} blockIDKey := blockID.String() @@ -325,6 +328,9 @@ func (c *InMemoryIndexCache) StoreExpandedPostings(blockID ulid.ULID, matchers [ // FetchExpandedPostings fetches expanded postings and returns cached data and a boolean value representing whether it is a cache hit or not. func (c *InMemoryIndexCache) FetchExpandedPostings(_ context.Context, blockID ulid.ULID, matchers []*labels.Matcher) ([]byte, bool) { + timer := prometheus.NewTimer(c.commonMetrics.fetchLatency.WithLabelValues(cacheTypeExpandedPostings)) + defer timer.ObserveDuration() + if b, ok := c.get(cacheTypeExpandedPostings, cacheKey{blockID.String(), cacheKeyExpandedPostings(labelMatchersToString(matchers)), ""}); ok { return b, true } @@ -341,6 +347,9 @@ func (c *InMemoryIndexCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef // FetchMultiSeries fetches multiple series - each identified by ID - from the cache // and returns a map containing cache hits, along with a list of missing IDs. func (c *InMemoryIndexCache) FetchMultiSeries(_ context.Context, blockID ulid.ULID, ids []storage.SeriesRef) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) { + timer := prometheus.NewTimer(c.commonMetrics.fetchLatency.WithLabelValues(cacheTypeSeries)) + defer timer.ObserveDuration() + hits = map[storage.SeriesRef][]byte{} blockIDKey := blockID.String() diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/memcached.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/memcached.go index 9292f3ed59..a3dbce9940 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/memcached.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/memcached.go @@ -33,15 +33,18 @@ type RemoteIndexCache struct { compressionScheme string // Metrics. - postingRequests prometheus.Counter - seriesRequests prometheus.Counter - expandedPostingRequests prometheus.Counter - postingHits prometheus.Counter - seriesHits prometheus.Counter - expandedPostingHits prometheus.Counter - postingDataSizeBytes prometheus.Observer - expandedPostingDataSizeBytes prometheus.Observer - seriesDataSizeBytes prometheus.Observer + postingRequests prometheus.Counter + seriesRequests prometheus.Counter + expandedPostingRequests prometheus.Counter + postingHits prometheus.Counter + seriesHits prometheus.Counter + expandedPostingHits prometheus.Counter + postingDataSizeBytes prometheus.Observer + expandedPostingDataSizeBytes prometheus.Observer + seriesDataSizeBytes prometheus.Observer + postingsFetchDuration prometheus.Observer + expandedPostingsFetchDuration prometheus.Observer + seriesFetchDuration prometheus.Observer } // NewRemoteIndexCache makes a new RemoteIndexCache. @@ -68,6 +71,10 @@ func NewRemoteIndexCache(logger log.Logger, cacheClient cacheutil.RemoteCacheCli c.seriesDataSizeBytes = commonMetrics.dataSizeBytes.WithLabelValues(cacheTypeSeries) c.expandedPostingDataSizeBytes = commonMetrics.dataSizeBytes.WithLabelValues(cacheTypeExpandedPostings) + c.postingsFetchDuration = commonMetrics.fetchLatency.WithLabelValues(cacheTypePostings) + c.seriesFetchDuration = commonMetrics.fetchLatency.WithLabelValues(cacheTypeSeries) + c.expandedPostingsFetchDuration = commonMetrics.fetchLatency.WithLabelValues(cacheTypeExpandedPostings) + level.Info(logger).Log("msg", "created index cache") return c, nil @@ -88,6 +95,9 @@ func (c *RemoteIndexCache) StorePostings(blockID ulid.ULID, l labels.Label, v [] // and returns a map containing cache hits, along with a list of missing keys. // In case of error, it logs and return an empty cache hits map. func (c *RemoteIndexCache) FetchMultiPostings(ctx context.Context, blockID ulid.ULID, lbls []labels.Label) (hits map[labels.Label][]byte, misses []labels.Label) { + timer := prometheus.NewTimer(c.postingsFetchDuration) + defer timer.ObserveDuration() + keys := make([]string, 0, len(lbls)) blockIDKey := blockID.String() @@ -138,6 +148,9 @@ func (c *RemoteIndexCache) StoreExpandedPostings(blockID ulid.ULID, keys []*labe // and returns a map containing cache hits, along with a list of missing keys. // In case of error, it logs and return an empty cache hits map. func (c *RemoteIndexCache) FetchExpandedPostings(ctx context.Context, blockID ulid.ULID, lbls []*labels.Matcher) ([]byte, bool) { + timer := prometheus.NewTimer(c.postingsFetchDuration) + defer timer.ObserveDuration() + key := cacheKey{blockID.String(), cacheKeyExpandedPostings(labelMatchersToString(lbls)), c.compressionScheme}.string() // Fetch the keys from memcached in a single request. @@ -169,6 +182,9 @@ func (c *RemoteIndexCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, // and returns a map containing cache hits, along with a list of missing IDs. // In case of error, it logs and return an empty cache hits map. func (c *RemoteIndexCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULID, ids []storage.SeriesRef) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) { + timer := prometheus.NewTimer(c.postingsFetchDuration) + defer timer.ObserveDuration() + keys := make([]string, 0, len(ids)) blockIDKey := blockID.String() diff --git a/vendor/modules.txt b/vendor/modules.txt index e92ea755dc..d3cc541a84 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -902,7 +902,7 @@ github.com/thanos-io/promql-engine/logicalplan github.com/thanos-io/promql-engine/parser github.com/thanos-io/promql-engine/query github.com/thanos-io/promql-engine/worker -# github.com/thanos-io/thanos v0.32.4-0.20230921182036-6257767ec9d0 +# github.com/thanos-io/thanos v0.32.4-0.20230926060504-20d29008068f ## explicit; go 1.18 github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader From cbcf0390f01d41220f32b379b4bd5376d8d370cf Mon Sep 17 00:00:00 2001 From: Justin Jung Date: Tue, 26 Sep 2023 10:55:19 -0700 Subject: [PATCH 05/13] Check context before notifying frontend and scheduler (#5565) * Check context before notifying frontend and scheduler Signed-off-by: Justin Jung * Add changelog Signed-off-by: Justin Jung --------- Signed-off-by: Justin Jung --- CHANGELOG.md | 1 + pkg/querier/worker/scheduler_processor.go | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b3752cf625..dbdc6f3865 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -65,6 +65,7 @@ * [ENHANCEMENT] All: Handling CMK Access Denied errors. #5420 #5542 * [ENHANCEMENT] Querier: Retry store gateway client connection closing gRPC error. #5558 * [ENHANCEMENT] QueryFrontend: Add generic retry for all APIs. #5561. +* [ENHANCEMENT] Querier: Check context before notifying scheduler and frontend. #5565 * [ENHANCEMENT] QueryFrontend: Add metric for number of series requests. #5373 * [ENHANCEMENT] Store Gateway: Add histogram metrics for total time spent fetching series and chunks per request. #5573 * [BUGFIX] Ruler: Validate if rule group can be safely converted back to rule group yaml from protobuf message #5265 diff --git a/pkg/querier/worker/scheduler_processor.go b/pkg/querier/worker/scheduler_processor.go index d4e549e1a9..7ee7419064 100644 --- a/pkg/querier/worker/scheduler_processor.go +++ b/pkg/querier/worker/scheduler_processor.go @@ -158,6 +158,10 @@ func (sp *schedulerProcessor) querierLoop(c schedulerpb.SchedulerForQuerier_Quer logger := util_log.WithContext(ctx, sp.log) sp.runRequest(ctx, logger, request.QueryID, request.FrontendAddress, request.StatsEnabled, request.HttpRequest) + if err = ctx.Err(); err != nil { + return + } + // Report back to scheduler that processing of the query has finished. if err := c.Send(&schedulerpb.QuerierToScheduler{}); err != nil { level.Error(logger).Log("msg", "error notifying scheduler about finished query", "err", err, "addr", address) @@ -187,6 +191,10 @@ func (sp *schedulerProcessor) runRequest(ctx context.Context, logger log.Logger, level.Info(logger).Log("msg", "finished request", "status_code", response.Code, "response_size", len(response.GetBody())) } + if err = ctx.Err(); err != nil { + return + } + // Ensure responses that are too big are not retried. if len(response.Body) >= sp.maxMessageSize { level.Error(logger).Log("msg", "response larger than max message size", "size", len(response.Body), "maxMessageSize", sp.maxMessageSize) From dc3807ce840ed43a81a6f965ef7d2f84d1cb56b0 Mon Sep 17 00:00:00 2001 From: Wen Xu Date: Sun, 1 Oct 2023 22:19:11 +0000 Subject: [PATCH 06/13] =?UTF-8?q?add=20context=20timeout=20for=20waitInsta?= =?UTF-8?q?nceState=20call=20for=20alertmanager=20and=20s=E2=80=A6=20(#558?= =?UTF-8?q?1)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CHANGELOG.md | 1 + docs/blocks-storage/store-gateway.md | 4 ++++ docs/configuration/config-file-reference.md | 8 ++++++++ pkg/alertmanager/alertmanager_ring.go | 6 +++++- pkg/alertmanager/multitenant.go | 10 ++++++++-- pkg/storegateway/gateway.go | 10 ++++++++-- pkg/storegateway/gateway_ring.go | 4 ++++ 7 files changed, 38 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dbdc6f3865..ed4bc24eca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -87,6 +87,7 @@ * [BUGFIX] DDBKV: When no change detected in ring, retry the CAS until there is change. #5502 * [BUGFIX] Fix bug on objstore when configured to use S3 fips endpoints. #5540 * [BUGFIX] Ruler: Fix bug on ruler where a failure to load a single RuleGroup would prevent rulers to sync all RuleGroup. #5563 +* [BUGFIX] Store-Gateway and AlertManager: Add a `wait_instance_time_out` to WaitInstanceState context to avoid waiting forever. #5581 ## 1.15.1 2023-04-26 diff --git a/docs/blocks-storage/store-gateway.md b/docs/blocks-storage/store-gateway.md index d407806542..77f7546761 100644 --- a/docs/blocks-storage/store-gateway.md +++ b/docs/blocks-storage/store-gateway.md @@ -309,6 +309,10 @@ store_gateway: # CLI flag: -store-gateway.sharding-ring.wait-stability-max-duration [wait_stability_max_duration: | default = 5m] + # Timeout for waiting on store-gateway to become desired state in the ring. + # CLI flag: -store-gateway.sharding-ring.wait-instance-state-timeout + [wait_instance_state_timeout: | default = 10m] + # The sleep seconds when store-gateway is shutting down. Need to be close to # or larger than KV Store information propagation delay # CLI flag: -store-gateway.sharding-ring.final-sleep diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 0fb7777c30..6bb6d06ca7 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -378,6 +378,10 @@ sharding_ring: # CLI flag: -alertmanager.sharding-ring.final-sleep [final_sleep: | default = 0s] + # Timeout for waiting on alertmanager to become desired state in the ring. + # CLI flag: -alertmanager.sharding-ring.wait-instance-state-timeout + [wait_instance_state_timeout: | default = 10m] + # Name of network interface to read address from. # CLI flag: -alertmanager.sharding-ring.instance-interface-names [instance_interface_names: | default = [eth0 en0]] @@ -4867,6 +4871,10 @@ sharding_ring: # CLI flag: -store-gateway.sharding-ring.wait-stability-max-duration [wait_stability_max_duration: | default = 5m] + # Timeout for waiting on store-gateway to become desired state in the ring. + # CLI flag: -store-gateway.sharding-ring.wait-instance-state-timeout + [wait_instance_state_timeout: | default = 10m] + # The sleep seconds when store-gateway is shutting down. Need to be close to # or larger than KV Store information propagation delay # CLI flag: -store-gateway.sharding-ring.final-sleep diff --git a/pkg/alertmanager/alertmanager_ring.go b/pkg/alertmanager/alertmanager_ring.go index dc26f6a4db..cdb52b5ae8 100644 --- a/pkg/alertmanager/alertmanager_ring.go +++ b/pkg/alertmanager/alertmanager_ring.go @@ -49,7 +49,8 @@ type RingConfig struct { ReplicationFactor int `yaml:"replication_factor"` ZoneAwarenessEnabled bool `yaml:"zone_awareness_enabled"` - FinalSleep time.Duration `yaml:"final_sleep"` + FinalSleep time.Duration `yaml:"final_sleep"` + WaitInstanceStateTimeout time.Duration `yaml:"wait_instance_state_timeout"` // Instance details InstanceID string `yaml:"instance_id" doc:"hidden"` @@ -94,6 +95,9 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.InstanceZone, rfprefix+"instance-availability-zone", "", "The availability zone where this instance is running. Required if zone-awareness is enabled.") cfg.RingCheckPeriod = 5 * time.Second + + // Timeout durations + f.DurationVar(&cfg.WaitInstanceStateTimeout, rfprefix+"wait-instance-state-timeout", 10*time.Minute, "Timeout for waiting on alertmanager to become desired state in the ring.") } // ToLifecyclerConfig returns a LifecyclerConfig based on the alertmanager diff --git a/pkg/alertmanager/multitenant.go b/pkg/alertmanager/multitenant.go index 8ce007abc1..49a3e401ad 100644 --- a/pkg/alertmanager/multitenant.go +++ b/pkg/alertmanager/multitenant.go @@ -486,7 +486,10 @@ func (am *MultitenantAlertmanager) starting(ctx context.Context) (err error) { // We wait until the instance is in the JOINING state, once it does we know that tokens are assigned to this instance and we'll be ready to perform an initial sync of configs. level.Info(am.logger).Log("msg", "waiting until alertmanager is JOINING in the ring") - if err = ring.WaitInstanceState(ctx, am.ring, am.ringLifecycler.GetInstanceID(), ring.JOINING); err != nil { + ctxWithTimeout, cancel := context.WithTimeout(ctx, am.cfg.ShardingRing.WaitInstanceStateTimeout) + defer cancel() + if err = ring.WaitInstanceState(ctxWithTimeout, am.ring, am.ringLifecycler.GetInstanceID(), ring.JOINING); err != nil { + level.Error(am.logger).Log("msg", "alertmanager failed to become JOINING in the ring", "err", err) return err } level.Info(am.logger).Log("msg", "alertmanager is JOINING in the ring") @@ -519,7 +522,10 @@ func (am *MultitenantAlertmanager) starting(ctx context.Context) (err error) { // Wait until the ring client detected this instance in the ACTIVE state. level.Info(am.logger).Log("msg", "waiting until alertmanager is ACTIVE in the ring") - if err := ring.WaitInstanceState(ctx, am.ring, am.ringLifecycler.GetInstanceID(), ring.ACTIVE); err != nil { + ctxWithTimeout, cancel := context.WithTimeout(ctx, am.cfg.ShardingRing.WaitInstanceStateTimeout) + defer cancel() + if err := ring.WaitInstanceState(ctxWithTimeout, am.ring, am.ringLifecycler.GetInstanceID(), ring.ACTIVE); err != nil { + level.Error(am.logger).Log("msg", "alertmanager failed to become ACTIVE in the ring", "err", err) return err } level.Info(am.logger).Log("msg", "alertmanager is ACTIVE in the ring") diff --git a/pkg/storegateway/gateway.go b/pkg/storegateway/gateway.go index fe99a32fa1..536a7f2556 100644 --- a/pkg/storegateway/gateway.go +++ b/pkg/storegateway/gateway.go @@ -244,7 +244,10 @@ func (g *StoreGateway) starting(ctx context.Context) (err error) { // make sure that when we'll run the initial sync we already know the tokens // assigned to this instance. level.Info(g.logger).Log("msg", "waiting until store-gateway is JOINING in the ring") - if err := ring.WaitInstanceState(ctx, g.ring, g.ringLifecycler.GetInstanceID(), ring.JOINING); err != nil { + ctxWithTimeout, cancel := context.WithTimeout(ctx, g.gatewayCfg.ShardingRing.WaitInstanceStateTimeout) + defer cancel() + if err := ring.WaitInstanceState(ctxWithTimeout, g.ring, g.ringLifecycler.GetInstanceID(), ring.JOINING); err != nil { + level.Error(g.logger).Log("msg", "store-gateway failed to become JOINING in the ring", "err", err) return err } level.Info(g.logger).Log("msg", "store-gateway is JOINING in the ring") @@ -285,7 +288,10 @@ func (g *StoreGateway) starting(ctx context.Context) (err error) { // make sure that when we'll run the loop it won't be detected as a ring // topology change. level.Info(g.logger).Log("msg", "waiting until store-gateway is ACTIVE in the ring") - if err := ring.WaitInstanceState(ctx, g.ring, g.ringLifecycler.GetInstanceID(), ring.ACTIVE); err != nil { + ctxWithTimeout, cancel := context.WithTimeout(ctx, g.gatewayCfg.ShardingRing.WaitInstanceStateTimeout) + defer cancel() + if err := ring.WaitInstanceState(ctxWithTimeout, g.ring, g.ringLifecycler.GetInstanceID(), ring.ACTIVE); err != nil { + level.Error(g.logger).Log("msg", "store-gateway failed to become ACTIVE in the ring", "err", err) return err } level.Info(g.logger).Log("msg", "store-gateway is ACTIVE in the ring") diff --git a/pkg/storegateway/gateway_ring.go b/pkg/storegateway/gateway_ring.go index 06d2836835..8965c32f95 100644 --- a/pkg/storegateway/gateway_ring.go +++ b/pkg/storegateway/gateway_ring.go @@ -72,6 +72,7 @@ type RingConfig struct { // Wait ring stability. WaitStabilityMinDuration time.Duration `yaml:"wait_stability_min_duration"` WaitStabilityMaxDuration time.Duration `yaml:"wait_stability_max_duration"` + WaitInstanceStateTimeout time.Duration `yaml:"wait_instance_state_timeout"` FinalSleep time.Duration `yaml:"final_sleep"` @@ -123,6 +124,9 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { // Defaults for internal settings. cfg.RingCheckPeriod = 5 * time.Second + + // Timeout durations + f.DurationVar(&cfg.WaitInstanceStateTimeout, ringFlagsPrefix+"wait-instance-state-timeout", 10*time.Minute, "Timeout for waiting on store-gateway to become desired state in the ring.") } func (cfg *RingConfig) ToRingConfig() ring.Config { From 7812330a0965385db3433aa64218b18be2a2b9af Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Mon, 2 Oct 2023 08:30:57 -0700 Subject: [PATCH 07/13] Expose series batch size flag (#5582) * expose series batch size flag Signed-off-by: Ben Ye update docs Signed-off-by: Ben Ye * changelog Signed-off-by: Ben Ye --------- Signed-off-by: Ben Ye --- CHANGELOG.md | 3 ++- docs/blocks-storage/querier.md | 5 +++++ docs/blocks-storage/store-gateway.md | 5 +++++ docs/configuration/config-file-reference.md | 5 +++++ go.mod | 2 +- go.sum | 4 ++-- pkg/storage/tsdb/config.go | 4 ++++ pkg/storegateway/bucket_stores.go | 2 +- vendor/github.com/thanos-io/thanos/pkg/store/bucket.go | 2 +- .../github.com/thanos-io/thanos/pkg/store/cache/memcached.go | 4 ++-- vendor/modules.txt | 2 +- 11 files changed, 29 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ed4bc24eca..b91b4c0b6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,8 @@ * [FEATURE] AlertManager: Add support for Webex, Discord and Telegram Receiver. #5493 * [FEATURE] Ingester: added `-admin-limit-message` to customize the message contained in limit errors.#5460 * [FEATURE] AlertManager: Update version to v0.26.0 and bring in Microsoft Teams receiver. #5543 -* [FEATURE] Store Gateway: Support lazy expanded posting optimization. Added new flag `"blocks-storage.bucket-store.lazy-expanded-postings-enabled` and new metrics `cortex_bucket_store_lazy_expanded_postings_total`, `cortex_bucket_store_lazy_expanded_posting_size_bytes_total` and `cortex_bucket_store_lazy_expanded_posting_series_overfetched_size_bytes_total`. #5556. +* [FEATURE] Store Gateway: Support lazy expanded posting optimization. Added new flag `blocks-storage.bucket-store.lazy-expanded-postings-enabled` and new metrics `cortex_bucket_store_lazy_expanded_postings_total`, `cortex_bucket_store_lazy_expanded_posting_size_bytes_total` and `cortex_bucket_store_lazy_expanded_posting_series_overfetched_size_bytes_total`. #5556. +* [FEATURE] Store Gateway: Added new flag `blocks-storage.bucket-store.series-batch-size` to control how many series to fetch per batch in Store Gateway. #5582. * [CHANGE] AlertManager: include reason label in cortex_alertmanager_notifications_failed_total.#5409 * [CHANGE] Query: Set CORS Origin headers for Query API #5388 * [CHANGE] Updating prometheus/alertmanager from v0.25.0 to v0.25.1-0.20230505130626-263ca5c9438e. This includes the below changes. #5276 diff --git a/docs/blocks-storage/querier.md b/docs/blocks-storage/querier.md index 76ecf8a179..b7283df8aa 100644 --- a/docs/blocks-storage/querier.md +++ b/docs/blocks-storage/querier.md @@ -1111,6 +1111,11 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.lazy-expanded-postings-enabled [lazy_expanded_postings_enabled: | default = false] + # Controls how many series to fetch per batch in Store Gateway. Default + # value is 10000. + # CLI flag: -blocks-storage.bucket-store.series-batch-size + [series_batch_size: | default = 10000] + tsdb: # Local directory to store TSDBs in the ingesters. # CLI flag: -blocks-storage.tsdb.dir diff --git a/docs/blocks-storage/store-gateway.md b/docs/blocks-storage/store-gateway.md index 77f7546761..82c30a121e 100644 --- a/docs/blocks-storage/store-gateway.md +++ b/docs/blocks-storage/store-gateway.md @@ -1218,6 +1218,11 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.lazy-expanded-postings-enabled [lazy_expanded_postings_enabled: | default = false] + # Controls how many series to fetch per batch in Store Gateway. Default + # value is 10000. + # CLI flag: -blocks-storage.bucket-store.series-batch-size + [series_batch_size: | default = 10000] + tsdb: # Local directory to store TSDBs in the ingesters. # CLI flag: -blocks-storage.tsdb.dir diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 6bb6d06ca7..71fce349af 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -1657,6 +1657,11 @@ bucket_store: # CLI flag: -blocks-storage.bucket-store.lazy-expanded-postings-enabled [lazy_expanded_postings_enabled: | default = false] + # Controls how many series to fetch per batch in Store Gateway. Default value + # is 10000. + # CLI flag: -blocks-storage.bucket-store.series-batch-size + [series_batch_size: | default = 10000] + tsdb: # Local directory to store TSDBs in the ingesters. # CLI flag: -blocks-storage.tsdb.dir diff --git a/go.mod b/go.mod index db3e9e7014..9716fb4769 100644 --- a/go.mod +++ b/go.mod @@ -53,7 +53,7 @@ require ( github.com/stretchr/testify v1.8.4 github.com/thanos-io/objstore v0.0.0-20230921130928-63a603e651ed github.com/thanos-io/promql-engine v0.0.0-20230821193351-e1ae4275b96e - github.com/thanos-io/thanos v0.32.4-0.20230926060504-20d29008068f + github.com/thanos-io/thanos v0.32.4-0.20231001083734-531cdb1e8ec3 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d go.etcd.io/etcd/api/v3 v3.5.9 diff --git a/go.sum b/go.sum index f77d8bb131..d901a4ad3a 100644 --- a/go.sum +++ b/go.sum @@ -1212,8 +1212,8 @@ github.com/thanos-io/objstore v0.0.0-20230921130928-63a603e651ed h1:iWQdY3S6DpWj github.com/thanos-io/objstore v0.0.0-20230921130928-63a603e651ed/go.mod h1:oJ82xgcBDzGJrEgUsjlTj6n01+ZWUMMUR8BlZzX5xDE= github.com/thanos-io/promql-engine v0.0.0-20230821193351-e1ae4275b96e h1:kwsFCU8eSkZehbrAN3nXPw5RdMHi/Bok/y8l2C4M+gk= github.com/thanos-io/promql-engine v0.0.0-20230821193351-e1ae4275b96e/go.mod h1:+T/ZYNCGybT6eTsGGvVtGb63nT1cvUmH6MjqRrcQoKw= -github.com/thanos-io/thanos v0.32.4-0.20230926060504-20d29008068f h1:OdZZLgF2eYIiad7h4WeUPkew7Uq6F9vFPg3aDZfMQLY= -github.com/thanos-io/thanos v0.32.4-0.20230926060504-20d29008068f/go.mod h1:Px5Boq60s+2WwR+V4v4oxgmxfw9WHrwMwjRou6pkUNw= +github.com/thanos-io/thanos v0.32.4-0.20231001083734-531cdb1e8ec3 h1:ekD3P1XF0Hlg/u7rSNqdyLhwYE4W4RGfkMDudtepRL8= +github.com/thanos-io/thanos v0.32.4-0.20231001083734-531cdb1e8ec3/go.mod h1:Px5Boq60s+2WwR+V4v4oxgmxfw9WHrwMwjRou6pkUNw= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index e8af5e1c41..ebbd0b2886 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -280,6 +280,9 @@ type BucketStoreConfig struct { // On the contrary, smaller value will increase baseline memory usage, but improve latency slightly. // 1 will keep all in memory. Default value is the same as in Prometheus which gives a good balance. PostingOffsetsInMemSampling int `yaml:"postings_offsets_in_mem_sampling" doc:"hidden"` + + // Controls how many series to fetch per batch in Store Gateway. Default value is 10000. + SeriesBatchSize int `yaml:"series_batch_size"` } // RegisterFlags registers the BucketStore flags @@ -311,6 +314,7 @@ func (cfg *BucketStoreConfig) RegisterFlags(f *flag.FlagSet) { f.Uint64Var(&cfg.EstimatedMaxSeriesSizeBytes, "blocks-storage.bucket-store.estimated-max-series-size-bytes", store.EstimatedMaxSeriesSize, "Estimated max series size in bytes. Setting a large value might result in over fetching data while a small value might result in data refetch. Default value is 64KB.") f.Uint64Var(&cfg.EstimatedMaxChunkSizeBytes, "blocks-storage.bucket-store.estimated-max-chunk-size-bytes", store.EstimatedMaxChunkSize, "Estimated max chunk size in bytes. Setting a large value might result in over fetching data while a small value might result in data refetch. Default value is 16KiB.") f.BoolVar(&cfg.LazyExpandedPostingsEnabled, "blocks-storage.bucket-store.lazy-expanded-postings-enabled", false, "If true, Store Gateway will estimate postings size and try to lazily expand postings if it downloads less data than expanding all postings.") + f.IntVar(&cfg.SeriesBatchSize, "blocks-storage.bucket-store.series-batch-size", store.SeriesBatchSize, "Controls how many series to fetch per batch in Store Gateway. Default value is 10000.") } // Validate the config. diff --git a/pkg/storegateway/bucket_stores.go b/pkg/storegateway/bucket_stores.go index d7c709c4ec..e5630a2c1a 100644 --- a/pkg/storegateway/bucket_stores.go +++ b/pkg/storegateway/bucket_stores.go @@ -572,7 +572,7 @@ func (u *BucketStores) getOrCreateStore(userID string) (*store.BucketStore, erro store.WithIndexCache(u.indexCache), store.WithQueryGate(u.queryGate), store.WithChunkPool(u.chunksPool), - store.WithSeriesBatchSize(store.SeriesBatchSize), + store.WithSeriesBatchSize(u.cfg.BucketStore.SeriesBatchSize), store.WithBlockEstimatedMaxChunkFunc(func(m thanos_metadata.Meta) uint64 { if m.Thanos.IndexStats.ChunkMaxSize > 0 && uint64(m.Thanos.IndexStats.ChunkMaxSize) < u.cfg.BucketStore.EstimatedMaxChunkSizeBytes { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go index bc1507a367..2e40d010a3 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -1124,7 +1124,7 @@ func (b *blockSeriesClient) Recv() (*storepb.SeriesResponse, error) { func (b *blockSeriesClient) nextBatch() error { start := b.i - end := start + SeriesBatchSize + end := start + uint64(b.batchSize) if end > uint64(len(b.lazyPostings.postings)) { end = uint64(len(b.lazyPostings.postings)) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/memcached.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/memcached.go index a3dbce9940..104b936e8c 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/memcached.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/memcached.go @@ -148,7 +148,7 @@ func (c *RemoteIndexCache) StoreExpandedPostings(blockID ulid.ULID, keys []*labe // and returns a map containing cache hits, along with a list of missing keys. // In case of error, it logs and return an empty cache hits map. func (c *RemoteIndexCache) FetchExpandedPostings(ctx context.Context, blockID ulid.ULID, lbls []*labels.Matcher) ([]byte, bool) { - timer := prometheus.NewTimer(c.postingsFetchDuration) + timer := prometheus.NewTimer(c.expandedPostingsFetchDuration) defer timer.ObserveDuration() key := cacheKey{blockID.String(), cacheKeyExpandedPostings(labelMatchersToString(lbls)), c.compressionScheme}.string() @@ -182,7 +182,7 @@ func (c *RemoteIndexCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, // and returns a map containing cache hits, along with a list of missing IDs. // In case of error, it logs and return an empty cache hits map. func (c *RemoteIndexCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULID, ids []storage.SeriesRef) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) { - timer := prometheus.NewTimer(c.postingsFetchDuration) + timer := prometheus.NewTimer(c.seriesFetchDuration) defer timer.ObserveDuration() keys := make([]string, 0, len(ids)) diff --git a/vendor/modules.txt b/vendor/modules.txt index d3cc541a84..b74cc59f1f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -902,7 +902,7 @@ github.com/thanos-io/promql-engine/logicalplan github.com/thanos-io/promql-engine/parser github.com/thanos-io/promql-engine/query github.com/thanos-io/promql-engine/worker -# github.com/thanos-io/thanos v0.32.4-0.20230926060504-20d29008068f +# github.com/thanos-io/thanos v0.32.4-0.20231001083734-531cdb1e8ec3 ## explicit; go 1.18 github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader From 577045c7a265a0f961585268a1d0b2744ba67caf Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Fri, 6 Oct 2023 10:55:28 -0700 Subject: [PATCH 08/13] Support filtered index cache (#5587) * filtered cache support for Cortex Signed-off-by: Ben Ye * update thanos version Signed-off-by: Ben Ye --------- Signed-off-by: Ben Ye --- docs/blocks-storage/querier.md | 15 + docs/blocks-storage/store-gateway.md | 15 + docs/configuration/config-file-reference.md | 15 + go.mod | 2 +- go.sum | 4 +- pkg/storage/tsdb/index_cache.go | 75 ++++- pkg/storage/tsdb/index_cache_test.go | 40 ++- pkg/storage/tsdb/multilevel_cache.go | 30 +- pkg/storage/tsdb/multilevel_cache_test.go | 48 +-- .../pkg/block/indexheader/binary_reader.go | 7 +- .../pkg/block/indexheader/parallel_bucket.go | 231 ++++++++++++++ .../thanos/pkg/promclient/promclient.go | 19 +- .../thanos-io/thanos/pkg/store/bucket.go | 293 +++++++++--------- .../thanos-io/thanos/pkg/store/cache/cache.go | 22 +- .../thanos/pkg/store/cache/factory.go | 11 + .../thanos/pkg/store/cache/filter_cache.go | 88 ++++++ .../thanos/pkg/store/cache/inmemory.go | 49 +-- .../thanos/pkg/store/cache/memcached.go | 82 +++-- .../thanos/pkg/store/lazy_postings.go | 7 +- .../thanos-io/thanos/pkg/tenancy/tenancy.go | 2 + vendor/modules.txt | 2 +- 21 files changed, 772 insertions(+), 285 deletions(-) create mode 100644 vendor/github.com/thanos-io/thanos/pkg/block/indexheader/parallel_bucket.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/store/cache/filter_cache.go diff --git a/docs/blocks-storage/querier.md b/docs/blocks-storage/querier.md index b7283df8aa..2e9ec58de4 100644 --- a/docs/blocks-storage/querier.md +++ b/docs/blocks-storage/querier.md @@ -536,6 +536,11 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.index-cache.inmemory.max-size-bytes [max_size_bytes: | default = 1073741824] + # Selectively cache index item types. Supported values are Postings, + # ExpandedPostings and Series + # CLI flag: -blocks-storage.bucket-store.index-cache.inmemory.enabled-items + [enabled_items: | default = []] + memcached: # Comma separated list of memcached addresses. Supported prefixes are: # dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV @@ -583,6 +588,11 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.index-cache.memcached.auto-discovery [auto_discovery: | default = false] + # Selectively cache index item types. Supported values are Postings, + # ExpandedPostings and Series + # CLI flag: -blocks-storage.bucket-store.index-cache.memcached.enabled-items + [enabled_items: | default = []] + redis: # Comma separated list of redis addresses. Supported prefixes are: dns+ # (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, @@ -679,6 +689,11 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.index-cache.redis.cache-size [cache_size: | default = 0] + # Selectively cache index item types. Supported values are Postings, + # ExpandedPostings and Series + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.enabled-items + [enabled_items: | default = []] + chunks_cache: # Backend for chunks cache, if not empty. Supported values: memcached. # CLI flag: -blocks-storage.bucket-store.chunks-cache.backend diff --git a/docs/blocks-storage/store-gateway.md b/docs/blocks-storage/store-gateway.md index 82c30a121e..7162d34b93 100644 --- a/docs/blocks-storage/store-gateway.md +++ b/docs/blocks-storage/store-gateway.md @@ -643,6 +643,11 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.index-cache.inmemory.max-size-bytes [max_size_bytes: | default = 1073741824] + # Selectively cache index item types. Supported values are Postings, + # ExpandedPostings and Series + # CLI flag: -blocks-storage.bucket-store.index-cache.inmemory.enabled-items + [enabled_items: | default = []] + memcached: # Comma separated list of memcached addresses. Supported prefixes are: # dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV @@ -690,6 +695,11 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.index-cache.memcached.auto-discovery [auto_discovery: | default = false] + # Selectively cache index item types. Supported values are Postings, + # ExpandedPostings and Series + # CLI flag: -blocks-storage.bucket-store.index-cache.memcached.enabled-items + [enabled_items: | default = []] + redis: # Comma separated list of redis addresses. Supported prefixes are: dns+ # (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, @@ -786,6 +796,11 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.index-cache.redis.cache-size [cache_size: | default = 0] + # Selectively cache index item types. Supported values are Postings, + # ExpandedPostings and Series + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.enabled-items + [enabled_items: | default = []] + chunks_cache: # Backend for chunks cache, if not empty. Supported values: memcached. # CLI flag: -blocks-storage.bucket-store.chunks-cache.backend diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 71fce349af..5103df9a38 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -1083,6 +1083,11 @@ bucket_store: # CLI flag: -blocks-storage.bucket-store.index-cache.inmemory.max-size-bytes [max_size_bytes: | default = 1073741824] + # Selectively cache index item types. Supported values are Postings, + # ExpandedPostings and Series + # CLI flag: -blocks-storage.bucket-store.index-cache.inmemory.enabled-items + [enabled_items: | default = []] + memcached: # Comma separated list of memcached addresses. Supported prefixes are: # dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, @@ -1130,6 +1135,11 @@ bucket_store: # CLI flag: -blocks-storage.bucket-store.index-cache.memcached.auto-discovery [auto_discovery: | default = false] + # Selectively cache index item types. Supported values are Postings, + # ExpandedPostings and Series + # CLI flag: -blocks-storage.bucket-store.index-cache.memcached.enabled-items + [enabled_items: | default = []] + redis: # Comma separated list of redis addresses. Supported prefixes are: dns+ # (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, @@ -1226,6 +1236,11 @@ bucket_store: # CLI flag: -blocks-storage.bucket-store.index-cache.redis.cache-size [cache_size: | default = 0] + # Selectively cache index item types. Supported values are Postings, + # ExpandedPostings and Series + # CLI flag: -blocks-storage.bucket-store.index-cache.redis.enabled-items + [enabled_items: | default = []] + chunks_cache: # Backend for chunks cache, if not empty. Supported values: memcached. # CLI flag: -blocks-storage.bucket-store.chunks-cache.backend diff --git a/go.mod b/go.mod index 9716fb4769..0136803094 100644 --- a/go.mod +++ b/go.mod @@ -53,7 +53,7 @@ require ( github.com/stretchr/testify v1.8.4 github.com/thanos-io/objstore v0.0.0-20230921130928-63a603e651ed github.com/thanos-io/promql-engine v0.0.0-20230821193351-e1ae4275b96e - github.com/thanos-io/thanos v0.32.4-0.20231001083734-531cdb1e8ec3 + github.com/thanos-io/thanos v0.32.5-0.20231006043659-79bbf34b4275 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d go.etcd.io/etcd/api/v3 v3.5.9 diff --git a/go.sum b/go.sum index d901a4ad3a..caf94052e2 100644 --- a/go.sum +++ b/go.sum @@ -1212,8 +1212,8 @@ github.com/thanos-io/objstore v0.0.0-20230921130928-63a603e651ed h1:iWQdY3S6DpWj github.com/thanos-io/objstore v0.0.0-20230921130928-63a603e651ed/go.mod h1:oJ82xgcBDzGJrEgUsjlTj6n01+ZWUMMUR8BlZzX5xDE= github.com/thanos-io/promql-engine v0.0.0-20230821193351-e1ae4275b96e h1:kwsFCU8eSkZehbrAN3nXPw5RdMHi/Bok/y8l2C4M+gk= github.com/thanos-io/promql-engine v0.0.0-20230821193351-e1ae4275b96e/go.mod h1:+T/ZYNCGybT6eTsGGvVtGb63nT1cvUmH6MjqRrcQoKw= -github.com/thanos-io/thanos v0.32.4-0.20231001083734-531cdb1e8ec3 h1:ekD3P1XF0Hlg/u7rSNqdyLhwYE4W4RGfkMDudtepRL8= -github.com/thanos-io/thanos v0.32.4-0.20231001083734-531cdb1e8ec3/go.mod h1:Px5Boq60s+2WwR+V4v4oxgmxfw9WHrwMwjRou6pkUNw= +github.com/thanos-io/thanos v0.32.5-0.20231006043659-79bbf34b4275 h1:y2YPqM1XiBw7EhLg45F6A1g8bgt4yYxkaRAeQaNLWYk= +github.com/thanos-io/thanos v0.32.5-0.20231006043659-79bbf34b4275/go.mod h1:HwiHn7u6GeES403BTACOYib/JKAJknf8dByU/uJiEr0= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= diff --git a/pkg/storage/tsdb/index_cache.go b/pkg/storage/tsdb/index_cache.go index f9a51580b3..796f5a291a 100644 --- a/pkg/storage/tsdb/index_cache.go +++ b/pkg/storage/tsdb/index_cache.go @@ -14,6 +14,7 @@ import ( storecache "github.com/thanos-io/thanos/pkg/store/cache" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/flagext" ) const ( @@ -41,10 +42,10 @@ var ( ) type IndexCacheConfig struct { - Backend string `yaml:"backend"` - InMemory InMemoryIndexCacheConfig `yaml:"inmemory"` - Memcached MemcachedClientConfig `yaml:"memcached"` - Redis RedisClientConfig `yaml:"redis"` + Backend string `yaml:"backend"` + InMemory InMemoryIndexCacheConfig `yaml:"inmemory"` + Memcached MemcachedIndexCacheConfig `yaml:"memcached"` + Redis RedisIndexCacheConfig `yaml:"redis"` } func (cfg *IndexCacheConfig) RegisterFlags(f *flag.FlagSet) { @@ -85,6 +86,10 @@ func (cfg *IndexCacheConfig) Validate() error { if err := cfg.Redis.Validate(); err != nil { return err } + } else { + if err := cfg.InMemory.Validate(); err != nil { + return err + } } configuredBackends[backend] = struct{}{} @@ -94,17 +99,63 @@ func (cfg *IndexCacheConfig) Validate() error { } type InMemoryIndexCacheConfig struct { - MaxSizeBytes uint64 `yaml:"max_size_bytes"` + MaxSizeBytes uint64 `yaml:"max_size_bytes"` + EnabledItems []string `yaml:"enabled_items"` +} + +func (cfg *InMemoryIndexCacheConfig) Validate() error { + if err := storecache.ValidateEnabledItems(cfg.EnabledItems); err != nil { + return err + } + return nil } func (cfg *InMemoryIndexCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { f.Uint64Var(&cfg.MaxSizeBytes, prefix+"max-size-bytes", uint64(1*units.Gibibyte), "Maximum size in bytes of in-memory index cache used to speed up blocks index lookups (shared between all tenants).") + f.Var((*flagext.StringSlice)(&cfg.EnabledItems), prefix+"enabled-items", "Selectively cache index item types. Supported values are Postings, ExpandedPostings and Series") +} + +type MemcachedIndexCacheConfig struct { + ClientConfig MemcachedClientConfig `yaml:",inline"` + EnabledItems []string `yaml:"enabled_items"` +} + +func (cfg *MemcachedIndexCacheConfig) Validate() error { + if err := cfg.ClientConfig.Validate(); err != nil { + return err + } + return storecache.ValidateEnabledItems(cfg.EnabledItems) +} + +func (cfg *MemcachedIndexCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { + cfg.ClientConfig.RegisterFlagsWithPrefix(f, prefix) + f.Var((*flagext.StringSlice)(&cfg.EnabledItems), prefix+"enabled-items", "Selectively cache index item types. Supported values are Postings, ExpandedPostings and Series") +} + +type RedisIndexCacheConfig struct { + ClientConfig RedisClientConfig `yaml:",inline"` + EnabledItems []string `yaml:"enabled_items"` +} + +func (cfg *RedisIndexCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { + cfg.ClientConfig.RegisterFlagsWithPrefix(f, prefix) + f.Var((*flagext.StringSlice)(&cfg.EnabledItems), prefix+"enabled-items", "Selectively cache index item types. Supported values are Postings, ExpandedPostings and Series") +} + +func (cfg *RedisIndexCacheConfig) Validate() error { + if err := cfg.ClientConfig.Validate(); err != nil { + return err + } + return storecache.ValidateEnabledItems(cfg.EnabledItems) } // NewIndexCache creates a new index cache based on the input configuration. func NewIndexCache(cfg IndexCacheConfig, logger log.Logger, registerer prometheus.Registerer) (storecache.IndexCache, error) { splitBackends := strings.Split(cfg.Backend, ",") - var caches []storecache.IndexCache + var ( + caches []storecache.IndexCache + enabledItems []string + ) for i, backend := range splitBackends { iReg := registerer @@ -121,8 +172,9 @@ func NewIndexCache(cfg IndexCacheConfig, logger log.Logger, registerer prometheu return c, err } caches = append(caches, c) + enabledItems = cfg.InMemory.EnabledItems case IndexCacheBackendMemcached: - c, err := newMemcachedIndexCacheClient(cfg.Memcached, logger, registerer) + c, err := newMemcachedIndexCacheClient(cfg.Memcached.ClientConfig, logger, registerer) if err != nil { return nil, err } @@ -131,8 +183,9 @@ func NewIndexCache(cfg IndexCacheConfig, logger log.Logger, registerer prometheu return nil, err } caches = append(caches, cache) + enabledItems = cfg.Memcached.EnabledItems case IndexCacheBackendRedis: - c, err := newRedisIndexCacheClient(cfg.Redis, logger, iReg) + c, err := newRedisIndexCacheClient(cfg.Redis.ClientConfig, logger, iReg) if err != nil { return nil, err } @@ -141,9 +194,15 @@ func NewIndexCache(cfg IndexCacheConfig, logger log.Logger, registerer prometheu return nil, err } caches = append(caches, cache) + enabledItems = cfg.Redis.EnabledItems default: return nil, errUnsupportedIndexCacheBackend } + if len(enabledItems) > 0 { + latestCache := caches[len(caches)-1] + cache := storecache.NewFilteredIndexCache(latestCache, enabledItems) + caches[len(caches)-1] = cache + } } return newMultiLevelCache(caches...), nil diff --git a/pkg/storage/tsdb/index_cache_test.go b/pkg/storage/tsdb/index_cache_test.go index c5617f04d9..b0112c9c18 100644 --- a/pkg/storage/tsdb/index_cache_test.go +++ b/pkg/storage/tsdb/index_cache_test.go @@ -1,6 +1,7 @@ package tsdb import ( + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -35,11 +36,46 @@ func TestIndexCacheConfig_Validate(t *testing.T) { "one memcached address should pass": { cfg: IndexCacheConfig{ Backend: "memcached", - Memcached: MemcachedClientConfig{ - Addresses: "dns+localhost:11211", + Memcached: MemcachedIndexCacheConfig{ + ClientConfig: MemcachedClientConfig{ + Addresses: "dns+localhost:11211", + }, }, }, }, + "invalid enabled items memcached": { + cfg: IndexCacheConfig{ + Backend: "memcached", + Memcached: MemcachedIndexCacheConfig{ + ClientConfig: MemcachedClientConfig{ + Addresses: "dns+localhost:11211", + }, + EnabledItems: []string{"foo", "bar"}, + }, + }, + expected: fmt.Errorf("unsupported item type foo"), + }, + "invalid enabled items inmemory": { + cfg: IndexCacheConfig{ + Backend: "inmemory", + InMemory: InMemoryIndexCacheConfig{ + EnabledItems: []string{"foo", "bar"}, + }, + }, + expected: fmt.Errorf("unsupported item type foo"), + }, + "invalid enabled items redis": { + cfg: IndexCacheConfig{ + Backend: "redis", + Redis: RedisIndexCacheConfig{ + ClientConfig: RedisClientConfig{ + Addresses: "test", + }, + EnabledItems: []string{"foo", "bar"}, + }, + }, + expected: fmt.Errorf("unsupported item type foo"), + }, } for testName, testData := range tests { diff --git a/pkg/storage/tsdb/multilevel_cache.go b/pkg/storage/tsdb/multilevel_cache.go index 5283eedd66..032ae745a8 100644 --- a/pkg/storage/tsdb/multilevel_cache.go +++ b/pkg/storage/tsdb/multilevel_cache.go @@ -14,26 +14,26 @@ type multiLevelCache struct { caches []storecache.IndexCache } -func (m *multiLevelCache) StorePostings(blockID ulid.ULID, l labels.Label, v []byte) { +func (m *multiLevelCache) StorePostings(blockID ulid.ULID, l labels.Label, v []byte, tenant string) { wg := sync.WaitGroup{} wg.Add(len(m.caches)) for _, c := range m.caches { cache := c go func() { defer wg.Done() - cache.StorePostings(blockID, l, v) + cache.StorePostings(blockID, l, v, tenant) }() } wg.Wait() } -func (m *multiLevelCache) FetchMultiPostings(ctx context.Context, blockID ulid.ULID, keys []labels.Label) (hits map[labels.Label][]byte, misses []labels.Label) { +func (m *multiLevelCache) FetchMultiPostings(ctx context.Context, blockID ulid.ULID, keys []labels.Label, tenant string) (hits map[labels.Label][]byte, misses []labels.Label) { misses = keys hits = map[labels.Label][]byte{} backfillMap := map[storecache.IndexCache][]map[labels.Label][]byte{} for i, c := range m.caches { backfillMap[c] = []map[labels.Label][]byte{} - h, mi := c.FetchMultiPostings(ctx, blockID, misses) + h, mi := c.FetchMultiPostings(ctx, blockID, misses, tenant) misses = mi for label, bytes := range h { @@ -53,7 +53,7 @@ func (m *multiLevelCache) FetchMultiPostings(ctx context.Context, blockID ulid.U for cache, hit := range backfillMap { for _, values := range hit { for l, b := range values { - cache.StorePostings(blockID, l, b) + cache.StorePostings(blockID, l, b, tenant) } } } @@ -62,24 +62,24 @@ func (m *multiLevelCache) FetchMultiPostings(ctx context.Context, blockID ulid.U return hits, misses } -func (m *multiLevelCache) StoreExpandedPostings(blockID ulid.ULID, matchers []*labels.Matcher, v []byte) { +func (m *multiLevelCache) StoreExpandedPostings(blockID ulid.ULID, matchers []*labels.Matcher, v []byte, tenant string) { wg := sync.WaitGroup{} wg.Add(len(m.caches)) for _, c := range m.caches { cache := c go func() { defer wg.Done() - cache.StoreExpandedPostings(blockID, matchers, v) + cache.StoreExpandedPostings(blockID, matchers, v, tenant) }() } wg.Wait() } -func (m *multiLevelCache) FetchExpandedPostings(ctx context.Context, blockID ulid.ULID, matchers []*labels.Matcher) ([]byte, bool) { +func (m *multiLevelCache) FetchExpandedPostings(ctx context.Context, blockID ulid.ULID, matchers []*labels.Matcher, tenant string) ([]byte, bool) { for i, c := range m.caches { - if d, h := c.FetchExpandedPostings(ctx, blockID, matchers); h { + if d, h := c.FetchExpandedPostings(ctx, blockID, matchers, tenant); h { if i > 0 { - m.caches[i-1].StoreExpandedPostings(blockID, matchers, d) + m.caches[i-1].StoreExpandedPostings(blockID, matchers, d, tenant) } return d, h } @@ -88,27 +88,27 @@ func (m *multiLevelCache) FetchExpandedPostings(ctx context.Context, blockID uli return []byte{}, false } -func (m *multiLevelCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte) { +func (m *multiLevelCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte, tenant string) { wg := sync.WaitGroup{} wg.Add(len(m.caches)) for _, c := range m.caches { cache := c go func() { defer wg.Done() - cache.StoreSeries(blockID, id, v) + cache.StoreSeries(blockID, id, v, tenant) }() } wg.Wait() } -func (m *multiLevelCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULID, ids []storage.SeriesRef) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) { +func (m *multiLevelCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULID, ids []storage.SeriesRef, tenant string) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) { misses = ids hits = map[storage.SeriesRef][]byte{} backfillMap := map[storecache.IndexCache][]map[storage.SeriesRef][]byte{} for i, c := range m.caches { backfillMap[c] = []map[storage.SeriesRef][]byte{} - h, miss := c.FetchMultiSeries(ctx, blockID, misses) + h, miss := c.FetchMultiSeries(ctx, blockID, misses, tenant) misses = miss for label, bytes := range h { @@ -128,7 +128,7 @@ func (m *multiLevelCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULI for cache, hit := range backfillMap { for _, values := range hit { for m, b := range values { - cache.StoreSeries(blockID, m, b) + cache.StoreSeries(blockID, m, b, tenant) } } } diff --git a/pkg/storage/tsdb/multilevel_cache_test.go b/pkg/storage/tsdb/multilevel_cache_test.go index 749da9da5b..93956d8063 100644 --- a/pkg/storage/tsdb/multilevel_cache_test.go +++ b/pkg/storage/tsdb/multilevel_cache_test.go @@ -37,8 +37,10 @@ func Test_MultiIndexCacheInstantiation(t *testing.T) { "instantiate multiples backends - inmemory/redis": { cfg: IndexCacheConfig{ Backend: "inmemory,redis", - Redis: RedisClientConfig{ - Addresses: s.Addr(), + Redis: RedisIndexCacheConfig{ + ClientConfig: RedisClientConfig{ + Addresses: s.Addr(), + }, }, }, expectedType: newMultiLevelCache(), @@ -46,9 +48,11 @@ func Test_MultiIndexCacheInstantiation(t *testing.T) { "instantiate multiples backends - inmemory/memcached": { cfg: IndexCacheConfig{ Backend: "inmemory,memcached", - Memcached: MemcachedClientConfig{ - Addresses: s.Addr(), - MaxAsyncConcurrency: 1000, + Memcached: MemcachedIndexCacheConfig{ + ClientConfig: MemcachedClientConfig{ + Addresses: s.Addr(), + MaxAsyncConcurrency: 1000, + }, }, }, expectedType: newMultiLevelCache(), @@ -112,7 +116,7 @@ func Test_MultiLevelCache(t *testing.T) { "StorePostings": {{bID, l1, v}}, }, call: func(cache storecache.IndexCache) { - cache.StorePostings(bID, l1, v) + cache.StorePostings(bID, l1, v, "") }, }, "[StoreSeries] Should store on all caches": { @@ -123,7 +127,7 @@ func Test_MultiLevelCache(t *testing.T) { "StoreSeries": {{bID, storage.SeriesRef(1), v}}, }, call: func(cache storecache.IndexCache) { - cache.StoreSeries(bID, 1, v) + cache.StoreSeries(bID, 1, v, "") }, }, "[StoreExpandedPostings] Should store on all caches": { @@ -134,7 +138,7 @@ func Test_MultiLevelCache(t *testing.T) { "StoreExpandedPostings": {{bID, []*labels.Matcher{matcher}, v}}, }, call: func(cache storecache.IndexCache) { - cache.StoreExpandedPostings(bID, []*labels.Matcher{matcher}, v) + cache.StoreExpandedPostings(bID, []*labels.Matcher{matcher}, v, "") }, }, "[FetchMultiPostings] Should fallback when all misses": { @@ -145,7 +149,7 @@ func Test_MultiLevelCache(t *testing.T) { "FetchMultiPostings": {{bID, []labels.Label{l1, l2}}}, }, call: func(cache storecache.IndexCache) { - cache.FetchMultiPostings(ctx, bID, []labels.Label{l1, l2}) + cache.FetchMultiPostings(ctx, bID, []labels.Label{l1, l2}, "") }, }, "[FetchMultiPostings] should fallback and backfill only the missing keys on l1": { @@ -163,7 +167,7 @@ func Test_MultiLevelCache(t *testing.T) { "FetchMultiPostings": {map[labels.Label][]byte{l2: v}, []labels.Label{}}, }, call: func(cache storecache.IndexCache) { - cache.FetchMultiPostings(ctx, bID, []labels.Label{l1, l2}) + cache.FetchMultiPostings(ctx, bID, []labels.Label{l1, l2}, "") }, }, "[FetchMultiPostings] should not fallback when all hit on l1": { @@ -175,7 +179,7 @@ func Test_MultiLevelCache(t *testing.T) { "FetchMultiPostings": {map[labels.Label][]byte{l1: make([]byte, 1), l2: make([]byte, 1)}, []labels.Label{}}, }, call: func(cache storecache.IndexCache) { - cache.FetchMultiPostings(ctx, bID, []labels.Label{l1, l2}) + cache.FetchMultiPostings(ctx, bID, []labels.Label{l1, l2}, "") }, }, "[FetchMultiSeries] Should fallback when all misses": { @@ -186,7 +190,7 @@ func Test_MultiLevelCache(t *testing.T) { "FetchMultiSeries": {{bID, []storage.SeriesRef{1, 2}}}, }, call: func(cache storecache.IndexCache) { - cache.FetchMultiSeries(ctx, bID, []storage.SeriesRef{1, 2}) + cache.FetchMultiSeries(ctx, bID, []storage.SeriesRef{1, 2}, "") }, }, "[FetchMultiSeries] should fallback and backfill only the missing keys on l1": { @@ -204,7 +208,7 @@ func Test_MultiLevelCache(t *testing.T) { "FetchMultiSeries": {map[storage.SeriesRef][]byte{2: v}, []storage.SeriesRef{2}}, }, call: func(cache storecache.IndexCache) { - cache.FetchMultiSeries(ctx, bID, []storage.SeriesRef{1, 2}) + cache.FetchMultiSeries(ctx, bID, []storage.SeriesRef{1, 2}, "") }, }, "[FetchMultiSeries] should not fallback when all hit on l1": { @@ -216,7 +220,7 @@ func Test_MultiLevelCache(t *testing.T) { "FetchMultiSeries": {map[storage.SeriesRef][]byte{1: make([]byte, 1), 2: make([]byte, 1)}, []storage.SeriesRef{}}, }, call: func(cache storecache.IndexCache) { - cache.FetchMultiSeries(ctx, bID, []storage.SeriesRef{1, 2}) + cache.FetchMultiSeries(ctx, bID, []storage.SeriesRef{1, 2}, "") }, }, "[FetchExpandedPostings] Should fallback and backfill when miss": { @@ -231,7 +235,7 @@ func Test_MultiLevelCache(t *testing.T) { "FetchExpandedPostings": {v, true}, }, call: func(cache storecache.IndexCache) { - cache.FetchExpandedPostings(ctx, bID, []*labels.Matcher{matcher}) + cache.FetchExpandedPostings(ctx, bID, []*labels.Matcher{matcher}, "") }, }, "[FetchExpandedPostings] should not fallback when all hit on l1": { @@ -243,7 +247,7 @@ func Test_MultiLevelCache(t *testing.T) { "FetchExpandedPostings": {[]byte{}, true}, }, call: func(cache storecache.IndexCache) { - cache.FetchExpandedPostings(ctx, bID, []*labels.Matcher{matcher}) + cache.FetchExpandedPostings(ctx, bID, []*labels.Matcher{matcher}, "") }, }, } @@ -272,11 +276,11 @@ type mockIndexCache struct { mockedCalls map[string][]interface{} } -func (m *mockIndexCache) StorePostings(blockID ulid.ULID, l labels.Label, v []byte) { +func (m *mockIndexCache) StorePostings(blockID ulid.ULID, l labels.Label, v []byte, tenant string) { m.calls["StorePostings"] = append(m.calls["StorePostings"], []interface{}{blockID, l, v}) } -func (m *mockIndexCache) FetchMultiPostings(_ context.Context, blockID ulid.ULID, keys []labels.Label) (hits map[labels.Label][]byte, misses []labels.Label) { +func (m *mockIndexCache) FetchMultiPostings(_ context.Context, blockID ulid.ULID, keys []labels.Label, tenant string) (hits map[labels.Label][]byte, misses []labels.Label) { m.calls["FetchMultiPostings"] = append(m.calls["FetchMultiPostings"], []interface{}{blockID, keys}) if m, ok := m.mockedCalls["FetchMultiPostings"]; ok { return m[0].(map[labels.Label][]byte), m[1].([]labels.Label) @@ -285,11 +289,11 @@ func (m *mockIndexCache) FetchMultiPostings(_ context.Context, blockID ulid.ULID return map[labels.Label][]byte{}, keys } -func (m *mockIndexCache) StoreExpandedPostings(blockID ulid.ULID, matchers []*labels.Matcher, v []byte) { +func (m *mockIndexCache) StoreExpandedPostings(blockID ulid.ULID, matchers []*labels.Matcher, v []byte, tenant string) { m.calls["StoreExpandedPostings"] = append(m.calls["StoreExpandedPostings"], []interface{}{blockID, matchers, v}) } -func (m *mockIndexCache) FetchExpandedPostings(_ context.Context, blockID ulid.ULID, matchers []*labels.Matcher) ([]byte, bool) { +func (m *mockIndexCache) FetchExpandedPostings(_ context.Context, blockID ulid.ULID, matchers []*labels.Matcher, tenant string) ([]byte, bool) { m.calls["FetchExpandedPostings"] = append(m.calls["FetchExpandedPostings"], []interface{}{blockID, matchers}) if m, ok := m.mockedCalls["FetchExpandedPostings"]; ok { return m[0].([]byte), m[1].(bool) @@ -298,11 +302,11 @@ func (m *mockIndexCache) FetchExpandedPostings(_ context.Context, blockID ulid.U return []byte{}, false } -func (m *mockIndexCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte) { +func (m *mockIndexCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte, tenant string) { m.calls["StoreSeries"] = append(m.calls["StoreSeries"], []interface{}{blockID, id, v}) } -func (m *mockIndexCache) FetchMultiSeries(_ context.Context, blockID ulid.ULID, ids []storage.SeriesRef) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) { +func (m *mockIndexCache) FetchMultiSeries(_ context.Context, blockID ulid.ULID, ids []storage.SeriesRef, tenant string) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) { m.calls["FetchMultiSeries"] = append(m.calls["FetchMultiSeries"], []interface{}{blockID, ids}) if m, ok := m.mockedCalls["FetchMultiSeries"]; ok { return m[0].(map[storage.SeriesRef][]byte), m[1].([]storage.SeriesRef) diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go index 7dbed1bec2..02c062337e 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go @@ -74,7 +74,12 @@ type BinaryTOC struct { // WriteBinary build index header from the pieces of index in object storage, and cached in file if necessary. func WriteBinary(ctx context.Context, bkt objstore.BucketReader, id ulid.ULID, filename string) ([]byte, error) { - ir, indexVersion, err := newChunkedIndexReader(ctx, bkt, id) + var tmpDir = "" + if filename != "" { + tmpDir = filepath.Dir(filename) + } + parallelBucket := WrapWithParallel(bkt, tmpDir) + ir, indexVersion, err := newChunkedIndexReader(ctx, parallelBucket, id) if err != nil { return nil, errors.Wrap(err, "new index reader") } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/parallel_bucket.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/parallel_bucket.go new file mode 100644 index 0000000000..6e52022d72 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/parallel_bucket.go @@ -0,0 +1,231 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package indexheader + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/prometheus/prometheus/tsdb/fileutil" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/runutil" + "golang.org/x/sync/errgroup" +) + +// partitionSize is used for splitting range reads. +const partitionSize = 16 * 1024 * 1024 // 16 MiB + +type parallelBucketReader struct { + objstore.BucketReader + tmpDir string + partitionSize int64 +} + +func WrapWithParallel(b objstore.BucketReader, tmpDir string) objstore.BucketReader { + return ¶llelBucketReader{ + BucketReader: b, + tmpDir: tmpDir, + partitionSize: partitionSize, + } +} + +// GetRange reads the range in parallel. +func (b *parallelBucketReader) GetRange(ctx context.Context, name string, off int64, length int64) (io.ReadCloser, error) { + partFilePrefix := uuid.New().String() + g, gctx := errgroup.WithContext(ctx) + + numParts := length / b.partitionSize + if length%b.partitionSize > 0 { + // A partial partition is remaining + numParts += 1 + } + + parts := make([]Part, 0, numParts) + + partId := 0 + for o := off; o < off+length; o += b.partitionSize { + l := b.partitionSize + if o+l > off+length { + // Partial partition + l = length - (int64(partId) * b.partitionSize) + } + + partOff := o + partLength := l + part, err := b.createPart(partFilePrefix, partId, int(partLength)) + if err != nil { + return nil, err + } + parts = append(parts, part) + + g.Go(func() error { + rc, err := b.BucketReader.GetRange(gctx, name, partOff, partLength) + defer runutil.CloseWithErrCapture(&err, rc, "close object") + if err != nil { + return errors.Wrap(err, fmt.Sprintf("get range part %v", partId)) + } + if _, err := io.Copy(part, rc); err != nil { + return errors.Wrap(err, fmt.Sprintf("get range part %v", partId)) + } + return part.Flush() + }) + partId += 1 + } + + if err := g.Wait(); err != nil { + return nil, err + } + return newPartMerger(parts), nil +} + +func (b *parallelBucketReader) createPart(partFilePrefix string, partId int, size int) (Part, error) { + if b.tmpDir == "" { + // Parts stored in memory + return newPartBuffer(size), nil + } + + partName := fmt.Sprintf("%s.part-%d", partFilePrefix, partId) + filename := filepath.Join(b.tmpDir, partName) + return newPartFile(filename) +} + +type partMerger struct { + closers []io.Closer + multiReader io.Reader +} + +func newPartMerger(parts []Part) *partMerger { + readers := make([]io.Reader, 0, len(parts)) + closers := make([]io.Closer, 0, len(parts)) + for _, p := range parts { + readers = append(readers, p.(io.Reader)) + closers = append(closers, p.(io.Closer)) + } + return &partMerger{ + closers: closers, + multiReader: io.MultiReader(readers...), + } +} + +func (m *partMerger) Read(b []byte) (n int, err error) { + n, err = m.multiReader.Read(b) + return +} + +func (m *partMerger) Close() (err error) { + var firstErr error = nil + for _, c := range m.closers { + if err := c.Close(); err != nil { + if firstErr == nil { + firstErr = err + } + } + } + return firstErr +} + +type Part interface { + Read(buf []byte) (int, error) + Write(buf []byte) (int, error) + Flush() error +} + +// partFile stores parts in temporary files. +type partFile struct { + file *os.File + fileWriter *bufio.Writer + fileReader *bufio.Reader +} + +func newPartFile(filename string) (*partFile, error) { + dir := filepath.Dir(filename) + df, err := fileutil.OpenDir(dir) + if os.IsNotExist(err) { + if err := os.MkdirAll(dir, os.ModePerm); err != nil { + return nil, errors.Wrap(err, "create temp dir") + } + df, err = fileutil.OpenDir(dir) + } + if err != nil { + return nil, errors.Wrap(err, "open temp dir") + } + + if err := df.Sync(); err != nil { + return nil, errors.Wrap(err, "sync dir") + } + + if err := os.RemoveAll(filename); err != nil { + return nil, errors.Wrap(err, "remove existing file") + } + f, err := os.OpenFile(filepath.Clean(filename), os.O_CREATE|os.O_RDWR, 0600) + if err != nil { + return nil, errors.Wrap(err, "open temp file") + } + return &partFile{ + file: f, + fileWriter: bufio.NewWriterSize(f, 32*1024), + fileReader: bufio.NewReaderSize(f, 32*1024), + }, nil +} + +func (p *partFile) Close() error { + if err := p.file.Close(); err != nil { + return err + } + return os.Remove(p.file.Name()) +} + +func (p *partFile) Flush() error { + if err := p.fileWriter.Flush(); err != nil { + return err + } + if err := p.file.Sync(); err != nil { + return err + } + // Seek is necessary because the part was just written to. + _, err := p.file.Seek(0, io.SeekStart) + return err +} + +func (p *partFile) Read(buf []byte) (int, error) { + return p.fileReader.Read(buf) +} + +func (p *partFile) Write(buf []byte) (int, error) { + return p.fileWriter.Write(buf) +} + +// partBuffer stores parts in memory. +type partBuffer struct { + buf *bytes.Buffer +} + +func newPartBuffer(size int) *partBuffer { + return &partBuffer{ + buf: bytes.NewBuffer(make([]byte, 0, size)), + } +} + +func (p *partBuffer) Close() error { + return nil +} + +func (p *partBuffer) Read(b []byte) (int, error) { + return p.buf.Read(b) +} + +func (p *partBuffer) Write(b []byte) (int, error) { + return p.buf.Write(b) +} + +func (p *partBuffer) Flush() error { + return nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go index f564fade72..f06e9ce54e 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go +++ b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go @@ -105,7 +105,7 @@ func NewWithTracingClient(logger log.Logger, httpClient *http.Client, userAgent // req2xx sends a request to the given url.URL. If method is http.MethodPost then // the raw query is encoded in the body and the appropriate Content-Type is set. -func (c *Client) req2xx(ctx context.Context, u *url.URL, method string) (_ []byte, _ int, err error) { +func (c *Client) req2xx(ctx context.Context, u *url.URL, method string, headers http.Header) (_ []byte, _ int, err error) { var b io.Reader if method == http.MethodPost { rq := u.RawQuery @@ -117,6 +117,10 @@ func (c *Client) req2xx(ctx context.Context, u *url.URL, method string) (_ []byt if err != nil { return nil, 0, errors.Wrapf(err, "create %s request", method) } + if headers != nil { + req.Header = headers + } + if c.userAgent != "" { req.Header.Set("User-Agent", c.userAgent) } @@ -166,7 +170,7 @@ func (c *Client) ExternalLabels(ctx context.Context, base *url.URL) (labels.Labe span, ctx := tracing.StartSpan(ctx, "/prom_config HTTP[client]") defer span.Finish() - body, _, err := c.req2xx(ctx, &u, http.MethodGet) + body, _, err := c.req2xx(ctx, &u, http.MethodGet, nil) if err != nil { return nil, err } @@ -363,6 +367,7 @@ type QueryOptions struct { MaxSourceResolution string Engine string Explain bool + HTTPHeaders http.Header } func (p *QueryOptions) AddTo(values url.Values) error { @@ -423,7 +428,7 @@ func (c *Client) QueryInstant(ctx context.Context, base *url.URL, query string, method = http.MethodGet } - body, _, err := c.req2xx(ctx, &u, method) + body, _, err := c.req2xx(ctx, &u, method, opts.HTTPHeaders) if err != nil { return nil, nil, nil, errors.Wrap(err, "read query instant response") } @@ -529,7 +534,7 @@ func (c *Client) QueryRange(ctx context.Context, base *url.URL, query string, st span, ctx := tracing.StartSpan(ctx, "/prom_query_range HTTP[client]") defer span.Finish() - body, _, err := c.req2xx(ctx, &u, http.MethodGet) + body, _, err := c.req2xx(ctx, &u, http.MethodGet, opts.HTTPHeaders) if err != nil { return nil, nil, nil, errors.Wrap(err, "read query range response") } @@ -612,7 +617,7 @@ func (c *Client) AlertmanagerAlerts(ctx context.Context, base *url.URL) ([]*mode span, ctx := tracing.StartSpan(ctx, "/alertmanager_alerts HTTP[client]") defer span.Finish() - body, _, err := c.req2xx(ctx, &u, http.MethodGet) + body, _, err := c.req2xx(ctx, &u, http.MethodGet, nil) if err != nil { return nil, err } @@ -643,7 +648,7 @@ func (c *Client) BuildVersion(ctx context.Context, base *url.URL) (string, error defer span.Finish() // We get status code 404 or 405 for prometheus versions lower than 2.14.0 - body, code, err := c.req2xx(ctx, &u, http.MethodGet) + body, code, err := c.req2xx(ctx, &u, http.MethodGet, nil) if err != nil { if code == http.StatusNotFound { return "0", nil @@ -675,7 +680,7 @@ func (c *Client) get2xxResultWithGRPCErrors(ctx context.Context, spanName string span, ctx := tracing.StartSpan(ctx, spanName) defer span.Finish() - body, code, err := c.req2xx(ctx, u, http.MethodGet) + body, code, err := c.req2xx(ctx, u, http.MethodGet, nil) if err != nil { if code, exists := statusToCode[code]; exists && code != 0 { return status.Error(code, err.Error()) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go index 2e40d010a3..01f32f4366 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -127,16 +127,16 @@ type bucketStoreMetrics struct { seriesDataFetched *prometheus.HistogramVec seriesDataSizeTouched *prometheus.HistogramVec seriesDataSizeFetched *prometheus.HistogramVec - seriesBlocksQueried prometheus.Histogram - seriesGetAllDuration prometheus.Histogram - seriesMergeDuration prometheus.Histogram - resultSeriesCount prometheus.Histogram - chunkSizeBytes prometheus.Histogram - postingsSizeBytes prometheus.Histogram + seriesBlocksQueried *prometheus.HistogramVec + seriesGetAllDuration *prometheus.HistogramVec + seriesMergeDuration *prometheus.HistogramVec + resultSeriesCount *prometheus.HistogramVec + chunkSizeBytes *prometheus.HistogramVec + postingsSizeBytes *prometheus.HistogramVec queriesDropped *prometheus.CounterVec - seriesRefetches prometheus.Counter - chunkRefetches prometheus.Counter - emptyPostingCount prometheus.Counter + seriesRefetches *prometheus.CounterVec + chunkRefetches *prometheus.CounterVec + emptyPostingCount *prometheus.CounterVec lazyExpandedPostingsCount prometheus.Counter lazyExpandedPostingSizeBytes prometheus.Counter @@ -145,18 +145,18 @@ type bucketStoreMetrics struct { cachedPostingsCompressions *prometheus.CounterVec cachedPostingsCompressionErrors *prometheus.CounterVec cachedPostingsCompressionTimeSeconds *prometheus.CounterVec - cachedPostingsOriginalSizeBytes prometheus.Counter - cachedPostingsCompressedSizeBytes prometheus.Counter + cachedPostingsOriginalSizeBytes *prometheus.CounterVec + cachedPostingsCompressedSizeBytes *prometheus.CounterVec - seriesFetchDuration prometheus.Histogram + seriesFetchDuration *prometheus.HistogramVec // Counts time for fetching series across all batches. - seriesFetchDurationSum prometheus.Histogram - postingsFetchDuration prometheus.Histogram + seriesFetchDurationSum *prometheus.HistogramVec + postingsFetchDuration *prometheus.HistogramVec // chunkFetchDuration counts total time loading chunks, but since we spawn // multiple goroutines the actual latency is usually much lower than it. - chunkFetchDuration prometheus.Histogram + chunkFetchDuration *prometheus.HistogramVec // Actual absolute total time for loading chunks. - chunkFetchDurationSum prometheus.Histogram + chunkFetchDurationSum *prometheus.HistogramVec } func newBucketStoreMetrics(reg prometheus.Registerer) *bucketStoreMetrics { @@ -196,138 +196,138 @@ func newBucketStoreMetrics(reg prometheus.Registerer) *bucketStoreMetrics { Name: "thanos_bucket_store_series_data_touched", Help: "Number of items of a data type touched to fulfill a single Store API series request.", Buckets: prometheus.ExponentialBuckets(200, 2, 15), - }, []string{"data_type"}) + }, []string{"data_type", tenancy.MetricLabel}) m.seriesDataFetched = promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "thanos_bucket_store_series_data_fetched", Help: "Number of items of a data type retrieved to fulfill a single Store API series request.", Buckets: prometheus.ExponentialBuckets(200, 2, 15), - }, []string{"data_type"}) + }, []string{"data_type", tenancy.MetricLabel}) m.seriesDataSizeTouched = promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "thanos_bucket_store_series_data_size_touched_bytes", Help: "Total size of items of a data type touched to fulfill a single Store API series request in Bytes.", Buckets: prometheus.ExponentialBuckets(1024, 2, 15), - }, []string{"data_type"}) + }, []string{"data_type", tenancy.MetricLabel}) m.seriesDataSizeFetched = promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "thanos_bucket_store_series_data_size_fetched_bytes", Help: "Total size of items of a data type fetched to fulfill a single Store API series request in Bytes.", Buckets: prometheus.ExponentialBuckets(1024, 2, 15), - }, []string{"data_type"}) + }, []string{"data_type", tenancy.MetricLabel}) - m.seriesBlocksQueried = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + m.seriesBlocksQueried = promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "thanos_bucket_store_series_blocks_queried", Help: "Number of blocks in a bucket store that were touched to satisfy a query.", Buckets: prometheus.ExponentialBuckets(1, 2, 10), - }) - m.seriesGetAllDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + }, []string{tenancy.MetricLabel}) + m.seriesGetAllDuration = promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "thanos_bucket_store_series_get_all_duration_seconds", Help: "Time it takes until all per-block prepares and loads for a query are finished.", Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, - }) - m.seriesMergeDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + }, []string{tenancy.MetricLabel}) + m.seriesMergeDuration = promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "thanos_bucket_store_series_merge_duration_seconds", Help: "Time it takes to merge sub-results from all queried blocks into a single result.", Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, - }) - m.resultSeriesCount = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + }, []string{tenancy.MetricLabel}) + m.resultSeriesCount = promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "thanos_bucket_store_series_result_series", Help: "Number of series observed in the final result of a query.", Buckets: prometheus.ExponentialBuckets(1, 2, 15), - }) + }, []string{tenancy.MetricLabel}) - m.chunkSizeBytes = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + m.chunkSizeBytes = promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "thanos_bucket_store_sent_chunk_size_bytes", Help: "Size in bytes of the chunks for the single series, which is adequate to the gRPC message size sent to querier.", Buckets: []float64{ 32, 256, 512, 1024, 32 * 1024, 256 * 1024, 512 * 1024, 1024 * 1024, 32 * 1024 * 1024, 256 * 1024 * 1024, 512 * 1024 * 1024, }, - }) + }, []string{tenancy.MetricLabel}) - m.postingsSizeBytes = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + m.postingsSizeBytes = promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "thanos_bucket_store_postings_size_bytes", Help: "Size in bytes of the postings for a single series call.", Buckets: []float64{ 32, 256, 512, 1024, 32 * 1024, 256 * 1024, 512 * 1024, 1024 * 1024, 32 * 1024 * 1024, 128 * 1024 * 1024, 256 * 1024 * 1024, 512 * 1024 * 1024, 768 * 1024 * 1024, 1024 * 1024 * 1024, }, - }) + }, []string{tenancy.MetricLabel}) m.queriesDropped = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_bucket_store_queries_dropped_total", Help: "Number of queries that were dropped due to the limit.", - }, []string{"reason"}) - m.seriesRefetches = promauto.With(reg).NewCounter(prometheus.CounterOpts{ + }, []string{"reason", tenancy.MetricLabel}) + m.seriesRefetches = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_bucket_store_series_refetches_total", Help: "Total number of cases where configured estimated series bytes was not enough was to fetch series from index, resulting in refetch.", - }) - m.chunkRefetches = promauto.With(reg).NewCounter(prometheus.CounterOpts{ + }, []string{tenancy.MetricLabel}) + m.chunkRefetches = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_bucket_store_chunk_refetches_total", Help: "Total number of cases where configured estimated chunk bytes was not enough was to fetch chunks from object store, resulting in refetch.", - }) + }, []string{tenancy.MetricLabel}) m.cachedPostingsCompressions = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_bucket_store_cached_postings_compressions_total", Help: "Number of postings compressions before storing to index cache.", - }, []string{"op"}) - m.cachedPostingsCompressions.WithLabelValues(labelEncode) - m.cachedPostingsCompressions.WithLabelValues(labelDecode) + }, []string{"op", tenancy.MetricLabel}) + m.cachedPostingsCompressions.WithLabelValues(labelEncode, tenancy.DefaultTenant) + m.cachedPostingsCompressions.WithLabelValues(labelDecode, tenancy.DefaultTenant) m.cachedPostingsCompressionErrors = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_bucket_store_cached_postings_compression_errors_total", Help: "Number of postings compression errors.", - }, []string{"op"}) - m.cachedPostingsCompressionErrors.WithLabelValues(labelEncode) - m.cachedPostingsCompressionErrors.WithLabelValues(labelDecode) + }, []string{"op", tenancy.MetricLabel}) + m.cachedPostingsCompressionErrors.WithLabelValues(labelEncode, tenancy.DefaultTenant) + m.cachedPostingsCompressionErrors.WithLabelValues(labelDecode, tenancy.DefaultTenant) m.cachedPostingsCompressionTimeSeconds = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_bucket_store_cached_postings_compression_time_seconds_total", Help: "Time spent compressing postings before storing them into postings cache.", - }, []string{"op"}) - m.cachedPostingsCompressionTimeSeconds.WithLabelValues(labelEncode) - m.cachedPostingsCompressionTimeSeconds.WithLabelValues(labelDecode) + }, []string{"op", tenancy.MetricLabel}) + m.cachedPostingsCompressionTimeSeconds.WithLabelValues(labelEncode, tenancy.DefaultTenant) + m.cachedPostingsCompressionTimeSeconds.WithLabelValues(labelDecode, tenancy.DefaultTenant) - m.cachedPostingsOriginalSizeBytes = promauto.With(reg).NewCounter(prometheus.CounterOpts{ + m.cachedPostingsOriginalSizeBytes = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_bucket_store_cached_postings_original_size_bytes_total", Help: "Original size of postings stored into cache.", - }) - m.cachedPostingsCompressedSizeBytes = promauto.With(reg).NewCounter(prometheus.CounterOpts{ + }, []string{tenancy.MetricLabel}) + m.cachedPostingsCompressedSizeBytes = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_bucket_store_cached_postings_compressed_size_bytes_total", Help: "Compressed size of postings stored into cache.", - }) + }, []string{tenancy.MetricLabel}) - m.seriesFetchDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + m.seriesFetchDuration = promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "thanos_bucket_store_series_fetch_duration_seconds", Help: "The time it takes to fetch series to respond to a request sent to a store gateway. It includes both the time to fetch it from the cache and from storage in case of cache misses.", Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, - }) + }, []string{tenancy.MetricLabel}) - m.seriesFetchDurationSum = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + m.seriesFetchDurationSum = promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "thanos_bucket_store_series_fetch_duration_sum_seconds", Help: "The total time it takes to fetch series to respond to a request sent to a store gateway across all series batches. It includes both the time to fetch it from the cache and from storage in case of cache misses.", Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, - }) + }, []string{tenancy.MetricLabel}) - m.postingsFetchDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + m.postingsFetchDuration = promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "thanos_bucket_store_postings_fetch_duration_seconds", Help: "The time it takes to fetch postings to respond to a request sent to a store gateway. It includes both the time to fetch it from the cache and from storage in case of cache misses.", Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, - }) + }, []string{tenancy.MetricLabel}) - m.chunkFetchDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + m.chunkFetchDuration = promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "thanos_bucket_store_chunks_fetch_duration_seconds", Help: "The total time spent fetching chunks within a single request for one block.", Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, - }) + }, []string{tenancy.MetricLabel}) - m.chunkFetchDurationSum = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + m.chunkFetchDurationSum = promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "thanos_bucket_store_chunks_fetch_duration_sum_seconds", Help: "The total absolute time spent fetching chunks within a single request for one block.", Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, - }) + }, []string{tenancy.MetricLabel}) - m.emptyPostingCount = promauto.With(reg).NewCounter(prometheus.CounterOpts{ + m.emptyPostingCount = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_bucket_store_empty_postings_total", Help: "Total number of empty postings when fetching block series.", - }) + }, []string{tenancy.MetricLabel}) m.lazyExpandedPostingsCount = promauto.With(reg).NewCounter(prometheus.CounterOpts{ Name: "thanos_bucket_store_lazy_expanded_postings_total", @@ -423,18 +423,18 @@ func (s *BucketStore) validate() error { type noopCache struct{} -func (noopCache) StorePostings(ulid.ULID, labels.Label, []byte) {} -func (noopCache) FetchMultiPostings(_ context.Context, _ ulid.ULID, keys []labels.Label) (map[labels.Label][]byte, []labels.Label) { +func (noopCache) StorePostings(ulid.ULID, labels.Label, []byte, string) {} +func (noopCache) FetchMultiPostings(_ context.Context, _ ulid.ULID, keys []labels.Label, tenant string) (map[labels.Label][]byte, []labels.Label) { return map[labels.Label][]byte{}, keys } -func (noopCache) StoreExpandedPostings(_ ulid.ULID, _ []*labels.Matcher, _ []byte) {} -func (noopCache) FetchExpandedPostings(_ context.Context, _ ulid.ULID, _ []*labels.Matcher) ([]byte, bool) { +func (noopCache) StoreExpandedPostings(_ ulid.ULID, _ []*labels.Matcher, _ []byte, tenant string) {} +func (noopCache) FetchExpandedPostings(_ context.Context, _ ulid.ULID, _ []*labels.Matcher, tenant string) ([]byte, bool) { return []byte{}, false } -func (noopCache) StoreSeries(ulid.ULID, storage.SeriesRef, []byte) {} -func (noopCache) FetchMultiSeries(_ context.Context, _ ulid.ULID, ids []storage.SeriesRef) (map[storage.SeriesRef][]byte, []storage.SeriesRef) { +func (noopCache) StoreSeries(ulid.ULID, storage.SeriesRef, []byte, string) {} +func (noopCache) FetchMultiSeries(_ context.Context, _ ulid.ULID, ids []storage.SeriesRef, tenant string) (map[storage.SeriesRef][]byte, []storage.SeriesRef) { return map[storage.SeriesRef][]byte{}, ids } @@ -955,9 +955,10 @@ type blockSeriesClient struct { shardMatcher *storepb.ShardMatcher blockMatchers []*labels.Matcher calculateChunkHash bool - seriesFetchDurationSum prometheus.Histogram - chunkFetchDuration prometheus.Histogram - chunkFetchDurationSum prometheus.Histogram + seriesFetchDurationSum *prometheus.HistogramVec + chunkFetchDuration *prometheus.HistogramVec + chunkFetchDurationSum *prometheus.HistogramVec + tenant string // Internal state. i uint64 @@ -982,14 +983,15 @@ func newBlockSeriesClient( shardMatcher *storepb.ShardMatcher, calculateChunkHash bool, batchSize int, - seriesFetchDurationSum prometheus.Histogram, - chunkFetchDuration prometheus.Histogram, - chunkFetchDurationSum prometheus.Histogram, + seriesFetchDurationSum *prometheus.HistogramVec, + chunkFetchDuration *prometheus.HistogramVec, + chunkFetchDurationSum *prometheus.HistogramVec, extLsetToRemove map[string]struct{}, lazyExpandedPostingEnabled bool, lazyExpandedPostingsCount prometheus.Counter, lazyExpandedPostingSizeBytes prometheus.Counter, lazyExpandedPostingSeriesOverfetchedSizeBytes prometheus.Counter, + tenant string, ) *blockSeriesClient { var chunkr *bucketChunkReader if !req.SkipChunks { @@ -1029,6 +1031,7 @@ func newBlockSeriesClient( calculateChunkHash: calculateChunkHash, hasMorePostings: true, batchSize: batchSize, + tenant: tenant, } } @@ -1068,7 +1071,7 @@ func (b *blockSeriesClient) ExpandPostings( matchers sortedMatchers, seriesLimiter SeriesLimiter, ) error { - ps, err := b.indexr.ExpandedPostings(b.ctx, matchers, b.bytesLimiter, b.lazyExpandedPostingEnabled, b.lazyExpandedPostingSizeBytes) + ps, err := b.indexr.ExpandedPostings(b.ctx, matchers, b.bytesLimiter, b.lazyExpandedPostingEnabled, b.lazyExpandedPostingSizeBytes, b.tenant) if err != nil { return errors.Wrap(err, "expanded matching posting") } @@ -1099,16 +1102,16 @@ func (b *blockSeriesClient) ExpandPostings( func (b *blockSeriesClient) Recv() (*storepb.SeriesResponse, error) { for len(b.entries) == 0 && b.hasMorePostings { - if err := b.nextBatch(); err != nil { + if err := b.nextBatch(b.tenant); err != nil { return nil, err } } if len(b.entries) == 0 { - b.seriesFetchDurationSum.Observe(b.indexr.stats.SeriesDownloadLatencySum.Seconds()) + b.seriesFetchDurationSum.WithLabelValues(b.tenant).Observe(b.indexr.stats.SeriesDownloadLatencySum.Seconds()) if b.chunkr != nil { - b.chunkFetchDuration.Observe(b.chunkr.stats.ChunksFetchDurationSum.Seconds()) - b.chunkFetchDurationSum.Observe(b.chunkr.stats.ChunksDownloadLatencySum.Seconds()) + b.chunkFetchDuration.WithLabelValues(b.tenant).Observe(b.chunkr.stats.ChunksFetchDurationSum.Seconds()) + b.chunkFetchDurationSum.WithLabelValues(b.tenant).Observe(b.chunkr.stats.ChunksDownloadLatencySum.Seconds()) } return nil, io.EOF } @@ -1122,7 +1125,7 @@ func (b *blockSeriesClient) Recv() (*storepb.SeriesResponse, error) { }), nil } -func (b *blockSeriesClient) nextBatch() error { +func (b *blockSeriesClient) nextBatch(tenant string) error { start := b.i end := start + uint64(b.batchSize) if end > uint64(len(b.lazyPostings.postings)) { @@ -1143,7 +1146,7 @@ func (b *blockSeriesClient) nextBatch() error { b.expandedPostings[i] = b.expandedPostings[i] / 16 } } - b.indexr.storeExpandedPostingsToCache(b.blockMatchers, index.NewListPostings(b.expandedPostings), len(b.expandedPostings)) + b.indexr.storeExpandedPostingsToCache(b.blockMatchers, index.NewListPostings(b.expandedPostings), len(b.expandedPostings), tenant) } return nil } @@ -1153,7 +1156,7 @@ func (b *blockSeriesClient) nextBatch() error { b.chunkr.reset() } - if err := b.indexr.PreloadSeries(b.ctx, postingsBatch, b.bytesLimiter); err != nil { + if err := b.indexr.PreloadSeries(b.ctx, postingsBatch, b.bytesLimiter, b.tenant); err != nil { return errors.Wrap(err, "preload series") } @@ -1227,7 +1230,7 @@ OUTER: } if !b.skipChunks { - if err := b.chunkr.load(b.ctx, b.entries, b.loadAggregates, b.calculateChunkHash, b.bytesLimiter); err != nil { + if err := b.chunkr.load(b.ctx, b.entries, b.loadAggregates, b.calculateChunkHash, b.bytesLimiter, b.tenant); err != nil { return errors.Wrap(err, "load chunks") } } @@ -1376,7 +1379,6 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, seriesSrv storepb.Store } tenant, _ := tenancy.GetTenantFromGRPCMetadata(srv.Context()) - level.Debug(s.logger).Log("msg", "Tenant for Series request", "tenant", tenant) matchers, err := storepb.MatchersToPromMatchers(req.Matchers...) if err != nil { @@ -1386,7 +1388,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, seriesSrv storepb.Store req.MaxTime = s.limitMaxTime(req.MaxTime) var ( - bytesLimiter = s.bytesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("bytes")) + bytesLimiter = s.bytesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("bytes", tenant)) ctx = srv.Context() stats = &queryStats{} respSets []respSet @@ -1394,8 +1396,8 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, seriesSrv storepb.Store g, gctx = errgroup.WithContext(ctx) resHints = &hintspb.SeriesResponseHints{} reqBlockMatchers []*labels.Matcher - chunksLimiter = s.chunksLimiterFactory(s.metrics.queriesDropped.WithLabelValues("chunks")) - seriesLimiter = s.seriesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("series")) + chunksLimiter = s.chunksLimiterFactory(s.metrics.queriesDropped.WithLabelValues("chunks", tenant)) + seriesLimiter = s.seriesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("series", tenant)) queryStatsEnabled = false ) @@ -1467,6 +1469,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, seriesSrv storepb.Store s.metrics.lazyExpandedPostingsCount, s.metrics.lazyExpandedPostingSizeBytes, s.metrics.lazyExpandedPostingSeriesOverfetchedSizeBytes, + tenant, ) defer blockClient.Close() @@ -1505,7 +1508,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, seriesSrv storepb.Store blockClient, shardMatcher, false, - s.metrics.emptyPostingCount, + s.metrics.emptyPostingCount.WithLabelValues(tenant), nil, ) @@ -1521,28 +1524,28 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, seriesSrv storepb.Store s.mtx.RUnlock() defer func() { - s.metrics.seriesDataTouched.WithLabelValues("postings").Observe(float64(stats.postingsTouched)) - s.metrics.seriesDataFetched.WithLabelValues("postings").Observe(float64(stats.postingsFetched)) - s.metrics.seriesDataSizeTouched.WithLabelValues("postings").Observe(float64(stats.PostingsTouchedSizeSum)) - s.metrics.seriesDataSizeFetched.WithLabelValues("postings").Observe(float64(stats.PostingsFetchedSizeSum)) - s.metrics.seriesDataTouched.WithLabelValues("series").Observe(float64(stats.seriesTouched)) - s.metrics.seriesDataFetched.WithLabelValues("series").Observe(float64(stats.seriesFetched)) - s.metrics.seriesDataSizeTouched.WithLabelValues("series").Observe(float64(stats.SeriesTouchedSizeSum)) - s.metrics.seriesDataSizeFetched.WithLabelValues("series").Observe(float64(stats.SeriesFetchedSizeSum)) - s.metrics.seriesDataTouched.WithLabelValues("chunks").Observe(float64(stats.chunksTouched)) - s.metrics.seriesDataFetched.WithLabelValues("chunks").Observe(float64(stats.chunksFetched)) - s.metrics.seriesDataSizeTouched.WithLabelValues("chunks").Observe(float64(stats.ChunksTouchedSizeSum)) - s.metrics.seriesDataSizeFetched.WithLabelValues("chunks").Observe(float64(stats.ChunksFetchedSizeSum)) - s.metrics.resultSeriesCount.Observe(float64(stats.mergedSeriesCount)) - s.metrics.cachedPostingsCompressions.WithLabelValues(labelEncode).Add(float64(stats.cachedPostingsCompressions)) - s.metrics.cachedPostingsCompressions.WithLabelValues(labelDecode).Add(float64(stats.cachedPostingsDecompressions)) - s.metrics.cachedPostingsCompressionErrors.WithLabelValues(labelEncode).Add(float64(stats.cachedPostingsCompressionErrors)) - s.metrics.cachedPostingsCompressionErrors.WithLabelValues(labelDecode).Add(float64(stats.cachedPostingsDecompressionErrors)) - s.metrics.cachedPostingsCompressionTimeSeconds.WithLabelValues(labelEncode).Add(stats.CachedPostingsCompressionTimeSum.Seconds()) - s.metrics.cachedPostingsCompressionTimeSeconds.WithLabelValues(labelDecode).Add(stats.CachedPostingsDecompressionTimeSum.Seconds()) - s.metrics.cachedPostingsOriginalSizeBytes.Add(float64(stats.CachedPostingsOriginalSizeSum)) - s.metrics.cachedPostingsCompressedSizeBytes.Add(float64(stats.CachedPostingsCompressedSizeSum)) - s.metrics.postingsSizeBytes.Observe(float64(int(stats.PostingsFetchedSizeSum) + int(stats.PostingsTouchedSizeSum))) + s.metrics.seriesDataTouched.WithLabelValues("postings", tenant).Observe(float64(stats.postingsTouched)) + s.metrics.seriesDataFetched.WithLabelValues("postings", tenant).Observe(float64(stats.postingsFetched)) + s.metrics.seriesDataSizeTouched.WithLabelValues("postings", tenant).Observe(float64(stats.PostingsTouchedSizeSum)) + s.metrics.seriesDataSizeFetched.WithLabelValues("postings", tenant).Observe(float64(stats.PostingsFetchedSizeSum)) + s.metrics.seriesDataTouched.WithLabelValues("series", tenant).Observe(float64(stats.seriesTouched)) + s.metrics.seriesDataFetched.WithLabelValues("series", tenant).Observe(float64(stats.seriesFetched)) + s.metrics.seriesDataSizeTouched.WithLabelValues("series", tenant).Observe(float64(stats.SeriesTouchedSizeSum)) + s.metrics.seriesDataSizeFetched.WithLabelValues("series", tenant).Observe(float64(stats.SeriesFetchedSizeSum)) + s.metrics.seriesDataTouched.WithLabelValues("chunks", tenant).Observe(float64(stats.chunksTouched)) + s.metrics.seriesDataFetched.WithLabelValues("chunks", tenant).Observe(float64(stats.chunksFetched)) + s.metrics.seriesDataSizeTouched.WithLabelValues("chunks", tenant).Observe(float64(stats.ChunksTouchedSizeSum)) + s.metrics.seriesDataSizeFetched.WithLabelValues("chunks", tenant).Observe(float64(stats.ChunksFetchedSizeSum)) + s.metrics.resultSeriesCount.WithLabelValues(tenant).Observe(float64(stats.mergedSeriesCount)) + s.metrics.cachedPostingsCompressions.WithLabelValues(labelEncode, tenant).Add(float64(stats.cachedPostingsCompressions)) + s.metrics.cachedPostingsCompressions.WithLabelValues(labelDecode, tenant).Add(float64(stats.cachedPostingsDecompressions)) + s.metrics.cachedPostingsCompressionErrors.WithLabelValues(labelEncode, tenant).Add(float64(stats.cachedPostingsCompressionErrors)) + s.metrics.cachedPostingsCompressionErrors.WithLabelValues(labelDecode, tenant).Add(float64(stats.cachedPostingsDecompressionErrors)) + s.metrics.cachedPostingsCompressionTimeSeconds.WithLabelValues(labelEncode, tenant).Add(stats.CachedPostingsCompressionTimeSum.Seconds()) + s.metrics.cachedPostingsCompressionTimeSeconds.WithLabelValues(labelDecode, tenant).Add(stats.CachedPostingsDecompressionTimeSum.Seconds()) + s.metrics.cachedPostingsOriginalSizeBytes.WithLabelValues(tenant).Add(float64(stats.CachedPostingsOriginalSizeSum)) + s.metrics.cachedPostingsCompressedSizeBytes.WithLabelValues(tenant).Add(float64(stats.CachedPostingsCompressedSizeSum)) + s.metrics.postingsSizeBytes.WithLabelValues(tenant).Observe(float64(int(stats.PostingsFetchedSizeSum) + int(stats.PostingsTouchedSizeSum))) level.Debug(s.logger).Log("msg", "stats query processed", "request", req, @@ -1564,8 +1567,8 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, seriesSrv storepb.Store } stats.blocksQueried = len(respSets) stats.GetAllDuration = time.Since(begin) - s.metrics.seriesGetAllDuration.Observe(stats.GetAllDuration.Seconds()) - s.metrics.seriesBlocksQueried.Observe(float64(stats.blocksQueried)) + s.metrics.seriesGetAllDuration.WithLabelValues(tenant).Observe(stats.GetAllDuration.Seconds()) + s.metrics.seriesBlocksQueried.WithLabelValues(tenant).Observe(float64(stats.blocksQueried)) } // Merge the sub-results from each selected block. @@ -1593,7 +1596,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, seriesSrv storepb.Store stats.mergedSeriesCount++ if !req.SkipChunks { stats.mergedChunksCount += len(series.Chunks) - s.metrics.chunkSizeBytes.Observe(float64(chunksSize(series.Chunks))) + s.metrics.chunkSizeBytes.WithLabelValues(tenant).Observe(float64(chunksSize(series.Chunks))) } } if err = srv.Send(at); err != nil { @@ -1602,7 +1605,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, seriesSrv storepb.Store } } stats.MergeDuration = time.Since(begin) - s.metrics.seriesMergeDuration.Observe(stats.MergeDuration.Seconds()) + s.metrics.seriesMergeDuration.WithLabelValues(tenant).Observe(stats.MergeDuration.Seconds()) err = nil }) @@ -1648,7 +1651,6 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq } tenant, _ := tenancy.GetTenantFromGRPCMetadata(ctx) - level.Debug(s.logger).Log("msg", "Tenant for LabelNames request", "tenant", tenant) resHints := &hintspb.LabelNamesResponseHints{} @@ -1672,8 +1674,8 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq var mtx sync.Mutex var sets [][]string - var seriesLimiter = s.seriesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("series")) - var bytesLimiter = s.bytesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("bytes")) + var seriesLimiter = s.seriesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("series", tenant)) + var bytesLimiter = s.bytesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("bytes", tenant)) for _, b := range s.blocks { b := b @@ -1750,6 +1752,7 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq s.metrics.lazyExpandedPostingsCount, s.metrics.lazyExpandedPostingSizeBytes, s.metrics.lazyExpandedPostingSeriesOverfetchedSizeBytes, + tenant, ) defer blockClient.Close() @@ -1848,7 +1851,6 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR } tenant, _ := tenancy.GetTenantFromGRPCMetadata(ctx) - level.Debug(s.logger).Log("msg", "Tenant for LabelValues request", "tenant", tenant) resHints := &hintspb.LabelValuesResponseHints{} @@ -1872,8 +1874,8 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR var mtx sync.Mutex var sets [][]string - var seriesLimiter = s.seriesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("series")) - var bytesLimiter = s.bytesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("bytes")) + var seriesLimiter = s.seriesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("series", tenant)) + var bytesLimiter = s.bytesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("bytes", tenant)) for _, b := range s.blocks { b := b @@ -1953,6 +1955,7 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR s.metrics.lazyExpandedPostingsCount, s.metrics.lazyExpandedPostingSizeBytes, s.metrics.lazyExpandedPostingSeriesOverfetchedSizeBytes, + tenant, ) defer blockClient.Close() @@ -2394,14 +2397,14 @@ func (r *bucketIndexReader) reset() { // Reminder: A posting is a reference (represented as a uint64) to a series reference, which in turn points to the first // chunk where the series contains the matching label-value pair for a given block of data. Postings can be fetched by // single label name=value. -func (r *bucketIndexReader) ExpandedPostings(ctx context.Context, ms sortedMatchers, bytesLimiter BytesLimiter, lazyExpandedPostingEnabled bool, lazyExpandedPostingSizeBytes prometheus.Counter) (*lazyExpandedPostings, error) { +func (r *bucketIndexReader) ExpandedPostings(ctx context.Context, ms sortedMatchers, bytesLimiter BytesLimiter, lazyExpandedPostingEnabled bool, lazyExpandedPostingSizeBytes prometheus.Counter, tenant string) (*lazyExpandedPostings, error) { // Shortcut the case of `len(postingGroups) == 0`. It will only happen when no // matchers specified, and we don't need to fetch expanded postings from cache. if len(ms) == 0 { return nil, nil } - hit, postings, err := r.fetchExpandedPostingsFromCache(ctx, ms, bytesLimiter) + hit, postings, err := r.fetchExpandedPostingsFromCache(ctx, ms, bytesLimiter, tenant) if err != nil { return nil, err } @@ -2418,7 +2421,7 @@ func (r *bucketIndexReader) ExpandedPostings(ctx context.Context, ms sortedMatch return nil, errors.Wrap(err, "matchersToPostingGroups") } if postingGroups == nil { - r.storeExpandedPostingsToCache(ms, index.EmptyPostings(), 0) + r.storeExpandedPostingsToCache(ms, index.EmptyPostings(), 0, tenant) return nil, nil } i := 0 @@ -2446,13 +2449,13 @@ func (r *bucketIndexReader) ExpandedPostings(ctx context.Context, ms sortedMatch postingGroups = append(postingGroups, newPostingGroup(true, name, []string{value}, nil)) } - ps, err := fetchLazyExpandedPostings(ctx, postingGroups, r, bytesLimiter, addAllPostings, lazyExpandedPostingEnabled, lazyExpandedPostingSizeBytes) + ps, err := fetchLazyExpandedPostings(ctx, postingGroups, r, bytesLimiter, addAllPostings, lazyExpandedPostingEnabled, lazyExpandedPostingSizeBytes, tenant) if err != nil { return nil, errors.Wrap(err, "fetch and expand postings") } // If postings still have matchers to be applied lazily, cache expanded postings after filtering series so skip here. if !ps.lazyExpanded() { - r.storeExpandedPostingsToCache(ms, index.NewListPostings(ps.postings), len(ps.postings)) + r.storeExpandedPostingsToCache(ms, index.NewListPostings(ps.postings), len(ps.postings), tenant) } if len(ps.postings) > 0 { @@ -2743,8 +2746,8 @@ type postingPtr struct { ptr index.Range } -func (r *bucketIndexReader) fetchExpandedPostingsFromCache(ctx context.Context, ms []*labels.Matcher, bytesLimiter BytesLimiter) (bool, []storage.SeriesRef, error) { - dataFromCache, hit := r.block.indexCache.FetchExpandedPostings(ctx, r.block.meta.ULID, ms) +func (r *bucketIndexReader) fetchExpandedPostingsFromCache(ctx context.Context, ms []*labels.Matcher, bytesLimiter BytesLimiter, tenant string) (bool, []storage.SeriesRef, error) { + dataFromCache, hit := r.block.indexCache.FetchExpandedPostings(ctx, r.block.meta.ULID, ms, tenant) if !hit { return false, nil, nil } @@ -2788,7 +2791,7 @@ func (r *bucketIndexReader) fetchExpandedPostingsFromCache(ctx context.Context, return true, ps, nil } -func (r *bucketIndexReader) storeExpandedPostingsToCache(ms []*labels.Matcher, ps index.Postings, length int) { +func (r *bucketIndexReader) storeExpandedPostingsToCache(ms []*labels.Matcher, ps index.Postings, length int, tenant string) { // Encode postings to cache. We compress and cache postings before adding // 16 bytes padding in order to make compressed size smaller. dataToCache, compressionDuration, compressionErrors, compressedSize := r.encodePostingsToCache(ps, length) @@ -2797,7 +2800,7 @@ func (r *bucketIndexReader) storeExpandedPostingsToCache(ms []*labels.Matcher, p r.stats.CachedPostingsCompressionTimeSum += compressionDuration r.stats.CachedPostingsCompressedSizeSum += units.Base2Bytes(compressedSize) r.stats.CachedPostingsOriginalSizeSum += units.Base2Bytes(length * 4) // Estimate the posting list size. - r.block.indexCache.StoreExpandedPostings(r.block.meta.ULID, ms, dataToCache) + r.block.indexCache.StoreExpandedPostings(r.block.meta.ULID, ms, dataToCache, tenant) } var bufioReaderPool = sync.Pool{ @@ -2809,10 +2812,10 @@ var bufioReaderPool = sync.Pool{ // fetchPostings fill postings requested by posting groups. // It returns one posting for each key, in the same order. // If postings for given key is not fetched, entry at given index will be nil. -func (r *bucketIndexReader) fetchPostings(ctx context.Context, keys []labels.Label, bytesLimiter BytesLimiter) ([]index.Postings, []func(), error) { +func (r *bucketIndexReader) fetchPostings(ctx context.Context, keys []labels.Label, bytesLimiter BytesLimiter, tenant string) ([]index.Postings, []func(), error) { var closeFns []func() - timer := prometheus.NewTimer(r.block.metrics.postingsFetchDuration) + timer := prometheus.NewTimer(r.block.metrics.postingsFetchDuration.WithLabelValues(tenant)) defer timer.ObserveDuration() var ptrs []postingPtr @@ -2820,7 +2823,7 @@ func (r *bucketIndexReader) fetchPostings(ctx context.Context, keys []labels.Lab output := make([]index.Postings, len(keys)) // Fetch postings from the cache with a single call. - fromCache, _ := r.block.indexCache.FetchMultiPostings(ctx, r.block.meta.ULID, keys) + fromCache, _ := r.block.indexCache.FetchMultiPostings(ctx, r.block.meta.ULID, keys, tenant) for _, dataFromCache := range fromCache { if err := bytesLimiter.Reserve(uint64(len(dataFromCache))); err != nil { return nil, closeFns, httpgrpc.Errorf(int(codes.ResourceExhausted), "exceeded bytes limit while loading postings from index cache: %s", err) @@ -2938,7 +2941,7 @@ func (r *bucketIndexReader) fetchPostings(ctx context.Context, keys []labels.Lab r.stats.CachedPostingsCompressionTimeSum += time.Since(startCompression) r.mtx.Unlock() - r.block.indexCache.StorePostings(r.block.meta.ULID, keys[keyID], dataToCache) + r.block.indexCache.StorePostings(r.block.meta.ULID, keys[keyID], dataToCache, tenant) } r.mtx.Lock() @@ -3049,8 +3052,8 @@ func (it *bigEndianPostings) length() int { return len(it.list) / 4 } -func (r *bucketIndexReader) PreloadSeries(ctx context.Context, ids []storage.SeriesRef, bytesLimiter BytesLimiter) error { - timer := prometheus.NewTimer(r.block.metrics.seriesFetchDuration) +func (r *bucketIndexReader) PreloadSeries(ctx context.Context, ids []storage.SeriesRef, bytesLimiter BytesLimiter, tenant string) error { + timer := prometheus.NewTimer(r.block.metrics.seriesFetchDuration.WithLabelValues(tenant)) defer func() { d := timer.ObserveDuration() r.stats.SeriesDownloadLatencySum += d @@ -3058,7 +3061,7 @@ func (r *bucketIndexReader) PreloadSeries(ctx context.Context, ids []storage.Ser // Load series from cache, overwriting the list of ids to preload // with the missing ones. - fromCache, ids := r.block.indexCache.FetchMultiSeries(ctx, r.block.meta.ULID, ids) + fromCache, ids := r.block.indexCache.FetchMultiSeries(ctx, r.block.meta.ULID, ids, tenant) for id, b := range fromCache { r.loadedSeries[id] = b if err := bytesLimiter.Reserve(uint64(len(b))); err != nil { @@ -3077,13 +3080,13 @@ func (r *bucketIndexReader) PreloadSeries(ctx context.Context, ids []storage.Ser i, j := p.ElemRng[0], p.ElemRng[1] g.Go(func() error { - return r.loadSeries(ctx, ids[i:j], false, s, e, bytesLimiter) + return r.loadSeries(ctx, ids[i:j], false, s, e, bytesLimiter, tenant) }) } return g.Wait() } -func (r *bucketIndexReader) loadSeries(ctx context.Context, ids []storage.SeriesRef, refetch bool, start, end uint64, bytesLimiter BytesLimiter) error { +func (r *bucketIndexReader) loadSeries(ctx context.Context, ids []storage.SeriesRef, refetch bool, start, end uint64, bytesLimiter BytesLimiter, tenant string) error { begin := time.Now() if bytesLimiter != nil { @@ -3120,16 +3123,16 @@ func (r *bucketIndexReader) loadSeries(ctx context.Context, ids []storage.Series } // Inefficient, but should be rare. - r.block.metrics.seriesRefetches.Inc() + r.block.metrics.seriesRefetches.WithLabelValues(tenant).Inc() level.Warn(r.block.logger).Log("msg", "series size exceeded expected size; refetching", "id", id, "series length", n+int(l), "maxSeriesSize", r.block.estimatedMaxSeriesSize) // Fetch plus to get the size of next one if exists. - return r.loadSeries(ctx, ids[i:], true, uint64(id), uint64(id)+uint64(n+int(l)+1), bytesLimiter) + return r.loadSeries(ctx, ids[i:], true, uint64(id), uint64(id)+uint64(n+int(l)+1), bytesLimiter, tenant) } c = c[n : n+int(l)] r.mtx.Lock() r.loadedSeries[id] = c - r.block.indexCache.StoreSeries(r.block.meta.ULID, id, c) + r.block.indexCache.StoreSeries(r.block.meta.ULID, id, c, tenant) r.mtx.Unlock() } return nil @@ -3367,7 +3370,7 @@ func (r *bucketChunkReader) addLoad(id chunks.ChunkRef, seriesEntry, chunk int) } // load loads all added chunks and saves resulting aggrs to refs. -func (r *bucketChunkReader) load(ctx context.Context, res []seriesEntry, aggrs []storepb.Aggr, calculateChunkChecksum bool, bytesLimiter BytesLimiter) error { +func (r *bucketChunkReader) load(ctx context.Context, res []seriesEntry, aggrs []storepb.Aggr, calculateChunkChecksum bool, bytesLimiter BytesLimiter, tenant string) error { r.loadingChunksMtx.Lock() r.loadingChunks = true r.loadingChunksMtx.Unlock() @@ -3405,7 +3408,7 @@ func (r *bucketChunkReader) load(ctx context.Context, res []seriesEntry, aggrs [ p := p indices := pIdxs[p.ElemRng[0]:p.ElemRng[1]] g.Go(func() error { - return r.loadChunks(ctx, res, aggrs, seq, p, indices, calculateChunkChecksum, bytesLimiter) + return r.loadChunks(ctx, res, aggrs, seq, p, indices, calculateChunkChecksum, bytesLimiter, tenant) }) } } @@ -3414,7 +3417,7 @@ func (r *bucketChunkReader) load(ctx context.Context, res []seriesEntry, aggrs [ // loadChunks will read range [start, end] from the segment file with sequence number seq. // This data range covers chunks starting at supplied offsets. -func (r *bucketChunkReader) loadChunks(ctx context.Context, res []seriesEntry, aggrs []storepb.Aggr, seq int, part Part, pIdxs []loadIdx, calculateChunkChecksum bool, bytesLimiter BytesLimiter) error { +func (r *bucketChunkReader) loadChunks(ctx context.Context, res []seriesEntry, aggrs []storepb.Aggr, seq int, part Part, pIdxs []loadIdx, calculateChunkChecksum bool, bytesLimiter BytesLimiter, tenant string) error { var locked bool fetchBegin := time.Now() defer func() { @@ -3503,7 +3506,7 @@ func (r *bucketChunkReader) loadChunks(ctx context.Context, res []seriesEntry, a continue } - r.block.metrics.chunkRefetches.Inc() + r.block.metrics.chunkRefetches.WithLabelValues(tenant).Inc() // If we didn't fetch enough data for the chunk, fetch more. fetchBegin = time.Now() // Read entire chunk into new buffer. diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/cache.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/cache.go index 360cdd67e5..0811d89cc0 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/cache.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/cache.go @@ -15,6 +15,8 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "golang.org/x/crypto/blake2b" + + "github.com/thanos-io/thanos/pkg/tenancy" ) const ( @@ -36,24 +38,24 @@ var ( // (potentially with a deadline) as in the original user's request. type IndexCache interface { // StorePostings stores postings for a single series. - StorePostings(blockID ulid.ULID, l labels.Label, v []byte) + StorePostings(blockID ulid.ULID, l labels.Label, v []byte, tenant string) // FetchMultiPostings fetches multiple postings - each identified by a label - // and returns a map containing cache hits, along with a list of missing keys. - FetchMultiPostings(ctx context.Context, blockID ulid.ULID, keys []labels.Label) (hits map[labels.Label][]byte, misses []labels.Label) + FetchMultiPostings(ctx context.Context, blockID ulid.ULID, keys []labels.Label, tenant string) (hits map[labels.Label][]byte, misses []labels.Label) // StoreExpandedPostings stores expanded postings for a set of label matchers. - StoreExpandedPostings(blockID ulid.ULID, matchers []*labels.Matcher, v []byte) + StoreExpandedPostings(blockID ulid.ULID, matchers []*labels.Matcher, v []byte, tenant string) // FetchExpandedPostings fetches expanded postings and returns cached data and a boolean value representing whether it is a cache hit or not. - FetchExpandedPostings(ctx context.Context, blockID ulid.ULID, matchers []*labels.Matcher) ([]byte, bool) + FetchExpandedPostings(ctx context.Context, blockID ulid.ULID, matchers []*labels.Matcher, tenant string) ([]byte, bool) // StoreSeries stores a single series. - StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte) + StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte, tenant string) // FetchMultiSeries fetches multiple series - each identified by ID - from the cache // and returns a map containing cache hits, along with a list of missing IDs. - FetchMultiSeries(ctx context.Context, blockID ulid.ULID, ids []storage.SeriesRef) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) + FetchMultiSeries(ctx context.Context, blockID ulid.ULID, ids []storage.SeriesRef, tenant string) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) } // Common metrics that should be used by all cache implementations. @@ -69,23 +71,23 @@ func newCommonMetrics(reg prometheus.Registerer) *commonMetrics { requestTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_store_index_cache_requests_total", Help: "Total number of items requests to the cache.", - }, []string{"item_type"}), + }, []string{"item_type", tenancy.MetricLabel}), hitsTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_store_index_cache_hits_total", Help: "Total number of items requests to the cache that were a hit.", - }, []string{"item_type"}), + }, []string{"item_type", tenancy.MetricLabel}), dataSizeBytes: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "thanos_store_index_cache_stored_data_size_bytes", Help: "Histogram to track item data size stored in index cache", Buckets: []float64{ 32, 256, 512, 1024, 32 * 1024, 256 * 1024, 512 * 1024, 1024 * 1024, 32 * 1024 * 1024, 64 * 1024 * 1024, 128 * 1024 * 1024, 256 * 1024 * 1024, 512 * 1024 * 1024, }, - }, []string{"item_type"}), + }, []string{"item_type", tenancy.MetricLabel}), fetchLatency: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "thanos_store_index_cache_fetch_duration_seconds", Help: "Histogram to track latency to fetch items from index cache", Buckets: []float64{0.01, 0.1, 0.3, 0.6, 1, 3, 6, 10, 15, 20, 30, 45, 60, 90, 120}, - }, []string{"item_type"}), + }, []string{"item_type", tenancy.MetricLabel}), } } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/factory.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/factory.go index d4e9f0c5cd..4204c92f22 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/factory.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/factory.go @@ -28,6 +28,9 @@ const ( type IndexCacheConfig struct { Type IndexCacheProvider `yaml:"type"` Config interface{} `yaml:"config"` + + // Available item types are Postings, Series and ExpandedPostings. + EnabledItems []string `yaml:"enabled_items"` } // NewIndexCache initializes and returns new index cache. @@ -66,5 +69,13 @@ func NewIndexCache(logger log.Logger, confContentYaml []byte, reg prometheus.Reg if err != nil { return nil, errors.Wrap(err, fmt.Sprintf("create %s index cache", cacheConfig.Type)) } + + if len(cacheConfig.EnabledItems) > 0 { + if err = ValidateEnabledItems(cacheConfig.EnabledItems); err != nil { + return nil, err + } + cache = NewFilteredIndexCache(cache, cacheConfig.EnabledItems) + } + return cache, nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/filter_cache.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/filter_cache.go new file mode 100644 index 0000000000..193f7363a2 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/filter_cache.go @@ -0,0 +1,88 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package storecache + +import ( + "context" + "fmt" + + "github.com/oklog/ulid" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "golang.org/x/exp/slices" +) + +type FilteredIndexCache struct { + cache IndexCache + enabledItems []string +} + +// NewFilteredIndexCache creates a filtered index cache based on enabled items. +func NewFilteredIndexCache(cache IndexCache, enabledItems []string) *FilteredIndexCache { + return &FilteredIndexCache{ + cache: cache, + enabledItems: enabledItems, + } +} + +// StorePostings sets the postings identified by the ulid and label to the value v, +// if the postings already exists in the cache it is not mutated. +func (c *FilteredIndexCache) StorePostings(blockID ulid.ULID, l labels.Label, v []byte, tenant string) { + if len(c.enabledItems) == 0 || slices.Contains(c.enabledItems, cacheTypePostings) { + c.cache.StorePostings(blockID, l, v, tenant) + } +} + +// FetchMultiPostings fetches multiple postings - each identified by a label - +// and returns a map containing cache hits, along with a list of missing keys. +func (c *FilteredIndexCache) FetchMultiPostings(ctx context.Context, blockID ulid.ULID, keys []labels.Label, tenant string) (hits map[labels.Label][]byte, misses []labels.Label) { + if len(c.enabledItems) == 0 || slices.Contains(c.enabledItems, cacheTypePostings) { + return c.cache.FetchMultiPostings(ctx, blockID, keys, tenant) + } + return nil, keys +} + +// StoreExpandedPostings stores expanded postings for a set of label matchers. +func (c *FilteredIndexCache) StoreExpandedPostings(blockID ulid.ULID, matchers []*labels.Matcher, v []byte, tenant string) { + if len(c.enabledItems) == 0 || slices.Contains(c.enabledItems, cacheTypeExpandedPostings) { + c.cache.StoreExpandedPostings(blockID, matchers, v, tenant) + } +} + +// FetchExpandedPostings fetches expanded postings and returns cached data and a boolean value representing whether it is a cache hit or not. +func (c *FilteredIndexCache) FetchExpandedPostings(ctx context.Context, blockID ulid.ULID, matchers []*labels.Matcher, tenant string) ([]byte, bool) { + if len(c.enabledItems) == 0 || slices.Contains(c.enabledItems, cacheTypeExpandedPostings) { + return c.cache.FetchExpandedPostings(ctx, blockID, matchers, tenant) + } + return nil, false +} + +// StoreSeries sets the series identified by the ulid and id to the value v, +// if the series already exists in the cache it is not mutated. +func (c *FilteredIndexCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte, tenant string) { + if len(c.enabledItems) == 0 || slices.Contains(c.enabledItems, cacheTypeSeries) { + c.cache.StoreSeries(blockID, id, v, tenant) + } +} + +// FetchMultiSeries fetches multiple series - each identified by ID - from the cache +// and returns a map containing cache hits, along with a list of missing IDs. +func (c *FilteredIndexCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULID, ids []storage.SeriesRef, tenant string) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) { + if len(c.enabledItems) == 0 || slices.Contains(c.enabledItems, cacheTypeSeries) { + return c.cache.FetchMultiSeries(ctx, blockID, ids, tenant) + } + return nil, ids +} + +func ValidateEnabledItems(enabledItems []string) error { + for _, item := range enabledItems { + switch item { + // valid + case cacheTypePostings, cacheTypeExpandedPostings, cacheTypeSeries: + default: + return fmt.Errorf("unsupported item type %s", item) + } + } + return nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go index e0077acc35..3312c5faa0 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go @@ -21,6 +21,7 @@ import ( "gopkg.in/yaml.v2" "github.com/thanos-io/thanos/pkg/model" + "github.com/thanos-io/thanos/pkg/tenancy" ) var ( @@ -115,9 +116,9 @@ func NewInMemoryIndexCacheWithConfig(logger log.Logger, commonMetrics *commonMet c.added.WithLabelValues(cacheTypeSeries) c.added.WithLabelValues(cacheTypeExpandedPostings) - c.commonMetrics.requestTotal.WithLabelValues(cacheTypePostings) - c.commonMetrics.requestTotal.WithLabelValues(cacheTypeSeries) - c.commonMetrics.requestTotal.WithLabelValues(cacheTypeExpandedPostings) + c.commonMetrics.requestTotal.WithLabelValues(cacheTypePostings, tenancy.DefaultTenant) + c.commonMetrics.requestTotal.WithLabelValues(cacheTypeSeries, tenancy.DefaultTenant) + c.commonMetrics.requestTotal.WithLabelValues(cacheTypeExpandedPostings, tenancy.DefaultTenant) c.overflow = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_store_index_cache_items_overflowed_total", @@ -127,9 +128,9 @@ func NewInMemoryIndexCacheWithConfig(logger log.Logger, commonMetrics *commonMet c.overflow.WithLabelValues(cacheTypeSeries) c.overflow.WithLabelValues(cacheTypeExpandedPostings) - c.commonMetrics.hitsTotal.WithLabelValues(cacheTypePostings) - c.commonMetrics.hitsTotal.WithLabelValues(cacheTypeSeries) - c.commonMetrics.hitsTotal.WithLabelValues(cacheTypeExpandedPostings) + c.commonMetrics.hitsTotal.WithLabelValues(cacheTypePostings, tenancy.DefaultTenant) + c.commonMetrics.hitsTotal.WithLabelValues(cacheTypeSeries, tenancy.DefaultTenant) + c.commonMetrics.hitsTotal.WithLabelValues(cacheTypeExpandedPostings, tenancy.DefaultTenant) c.current = promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ Name: "thanos_store_index_cache_items", @@ -197,8 +198,8 @@ func (c *InMemoryIndexCache) onEvict(key, val interface{}) { c.curSize -= entrySize } -func (c *InMemoryIndexCache) get(typ string, key cacheKey) ([]byte, bool) { - c.commonMetrics.requestTotal.WithLabelValues(typ).Inc() +func (c *InMemoryIndexCache) get(typ string, key cacheKey, tenant string) ([]byte, bool) { + c.commonMetrics.requestTotal.WithLabelValues(typ, tenant).Inc() c.mtx.Lock() defer c.mtx.Unlock() @@ -207,7 +208,7 @@ func (c *InMemoryIndexCache) get(typ string, key cacheKey) ([]byte, bool) { if !ok { return nil, false } - c.commonMetrics.hitsTotal.WithLabelValues(typ).Inc() + c.commonMetrics.hitsTotal.WithLabelValues(typ, tenant).Inc() return v.([]byte), true } @@ -294,22 +295,22 @@ func copyToKey(l labels.Label) cacheKeyPostings { // StorePostings sets the postings identified by the ulid and label to the value v, // if the postings already exists in the cache it is not mutated. -func (c *InMemoryIndexCache) StorePostings(blockID ulid.ULID, l labels.Label, v []byte) { - c.commonMetrics.dataSizeBytes.WithLabelValues(cacheTypePostings).Observe(float64(len(v))) +func (c *InMemoryIndexCache) StorePostings(blockID ulid.ULID, l labels.Label, v []byte, tenant string) { + c.commonMetrics.dataSizeBytes.WithLabelValues(cacheTypePostings, tenant).Observe(float64(len(v))) c.set(cacheTypePostings, cacheKey{block: blockID.String(), key: copyToKey(l)}, v) } // FetchMultiPostings fetches multiple postings - each identified by a label - // and returns a map containing cache hits, along with a list of missing keys. -func (c *InMemoryIndexCache) FetchMultiPostings(_ context.Context, blockID ulid.ULID, keys []labels.Label) (hits map[labels.Label][]byte, misses []labels.Label) { - timer := prometheus.NewTimer(c.commonMetrics.fetchLatency.WithLabelValues(cacheTypePostings)) +func (c *InMemoryIndexCache) FetchMultiPostings(_ context.Context, blockID ulid.ULID, keys []labels.Label, tenant string) (hits map[labels.Label][]byte, misses []labels.Label) { + timer := prometheus.NewTimer(c.commonMetrics.fetchLatency.WithLabelValues(cacheTypePostings, tenant)) defer timer.ObserveDuration() hits = map[labels.Label][]byte{} blockIDKey := blockID.String() for _, key := range keys { - if b, ok := c.get(cacheTypePostings, cacheKey{blockIDKey, cacheKeyPostings(key), ""}); ok { + if b, ok := c.get(cacheTypePostings, cacheKey{blockIDKey, cacheKeyPostings(key), ""}, tenant); ok { hits[key] = b continue } @@ -321,17 +322,17 @@ func (c *InMemoryIndexCache) FetchMultiPostings(_ context.Context, blockID ulid. } // StoreExpandedPostings stores expanded postings for a set of label matchers. -func (c *InMemoryIndexCache) StoreExpandedPostings(blockID ulid.ULID, matchers []*labels.Matcher, v []byte) { - c.commonMetrics.dataSizeBytes.WithLabelValues(cacheTypeExpandedPostings).Observe(float64(len(v))) +func (c *InMemoryIndexCache) StoreExpandedPostings(blockID ulid.ULID, matchers []*labels.Matcher, v []byte, tenant string) { + c.commonMetrics.dataSizeBytes.WithLabelValues(cacheTypeExpandedPostings, tenant).Observe(float64(len(v))) c.set(cacheTypeExpandedPostings, cacheKey{block: blockID.String(), key: cacheKeyExpandedPostings(labelMatchersToString(matchers))}, v) } // FetchExpandedPostings fetches expanded postings and returns cached data and a boolean value representing whether it is a cache hit or not. -func (c *InMemoryIndexCache) FetchExpandedPostings(_ context.Context, blockID ulid.ULID, matchers []*labels.Matcher) ([]byte, bool) { - timer := prometheus.NewTimer(c.commonMetrics.fetchLatency.WithLabelValues(cacheTypeExpandedPostings)) +func (c *InMemoryIndexCache) FetchExpandedPostings(_ context.Context, blockID ulid.ULID, matchers []*labels.Matcher, tenant string) ([]byte, bool) { + timer := prometheus.NewTimer(c.commonMetrics.fetchLatency.WithLabelValues(cacheTypeExpandedPostings, tenant)) defer timer.ObserveDuration() - if b, ok := c.get(cacheTypeExpandedPostings, cacheKey{blockID.String(), cacheKeyExpandedPostings(labelMatchersToString(matchers)), ""}); ok { + if b, ok := c.get(cacheTypeExpandedPostings, cacheKey{blockID.String(), cacheKeyExpandedPostings(labelMatchersToString(matchers)), ""}, tenant); ok { return b, true } return nil, false @@ -339,22 +340,22 @@ func (c *InMemoryIndexCache) FetchExpandedPostings(_ context.Context, blockID ul // StoreSeries sets the series identified by the ulid and id to the value v, // if the series already exists in the cache it is not mutated. -func (c *InMemoryIndexCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte) { - c.commonMetrics.dataSizeBytes.WithLabelValues(cacheTypeSeries).Observe(float64(len(v))) +func (c *InMemoryIndexCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte, tenant string) { + c.commonMetrics.dataSizeBytes.WithLabelValues(cacheTypeSeries, tenant).Observe(float64(len(v))) c.set(cacheTypeSeries, cacheKey{blockID.String(), cacheKeySeries(id), ""}, v) } // FetchMultiSeries fetches multiple series - each identified by ID - from the cache // and returns a map containing cache hits, along with a list of missing IDs. -func (c *InMemoryIndexCache) FetchMultiSeries(_ context.Context, blockID ulid.ULID, ids []storage.SeriesRef) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) { - timer := prometheus.NewTimer(c.commonMetrics.fetchLatency.WithLabelValues(cacheTypeSeries)) +func (c *InMemoryIndexCache) FetchMultiSeries(_ context.Context, blockID ulid.ULID, ids []storage.SeriesRef, tenant string) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) { + timer := prometheus.NewTimer(c.commonMetrics.fetchLatency.WithLabelValues(cacheTypeSeries, tenant)) defer timer.ObserveDuration() hits = map[storage.SeriesRef][]byte{} blockIDKey := blockID.String() for _, id := range ids { - if b, ok := c.get(cacheTypeSeries, cacheKey{blockIDKey, cacheKeySeries(id), ""}); ok { + if b, ok := c.get(cacheTypeSeries, cacheKey{blockIDKey, cacheKeySeries(id), ""}, tenant); ok { hits[id] = b continue } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/memcached.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/memcached.go index 104b936e8c..bc8bb5b52c 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/memcached.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/memcached.go @@ -15,6 +15,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/thanos-io/thanos/pkg/cacheutil" + "github.com/thanos-io/thanos/pkg/tenancy" ) const ( @@ -33,18 +34,10 @@ type RemoteIndexCache struct { compressionScheme string // Metrics. - postingRequests prometheus.Counter - seriesRequests prometheus.Counter - expandedPostingRequests prometheus.Counter - postingHits prometheus.Counter - seriesHits prometheus.Counter - expandedPostingHits prometheus.Counter - postingDataSizeBytes prometheus.Observer - expandedPostingDataSizeBytes prometheus.Observer - seriesDataSizeBytes prometheus.Observer - postingsFetchDuration prometheus.Observer - expandedPostingsFetchDuration prometheus.Observer - seriesFetchDuration prometheus.Observer + requestTotal *prometheus.CounterVec + hitsTotal *prometheus.CounterVec + dataSizeBytes *prometheus.HistogramVec + fetchLatency *prometheus.HistogramVec } // NewRemoteIndexCache makes a new RemoteIndexCache. @@ -59,21 +52,23 @@ func NewRemoteIndexCache(logger log.Logger, cacheClient cacheutil.RemoteCacheCli commonMetrics = newCommonMetrics(reg) } - c.postingRequests = commonMetrics.requestTotal.WithLabelValues(cacheTypePostings) - c.seriesRequests = commonMetrics.requestTotal.WithLabelValues(cacheTypeSeries) - c.expandedPostingRequests = commonMetrics.requestTotal.WithLabelValues(cacheTypeExpandedPostings) + c.requestTotal = commonMetrics.requestTotal + c.hitsTotal = commonMetrics.hitsTotal + c.dataSizeBytes = commonMetrics.dataSizeBytes + c.fetchLatency = commonMetrics.fetchLatency - c.postingHits = commonMetrics.hitsTotal.WithLabelValues(cacheTypePostings) - c.seriesHits = commonMetrics.hitsTotal.WithLabelValues(cacheTypeSeries) - c.expandedPostingHits = commonMetrics.hitsTotal.WithLabelValues(cacheTypeExpandedPostings) + // Init requestTtotal and hitsTotal with default tenant + c.requestTotal.WithLabelValues(cacheTypePostings, tenancy.DefaultTenant) + c.requestTotal.WithLabelValues(cacheTypeSeries, tenancy.DefaultTenant) + c.requestTotal.WithLabelValues(cacheTypeExpandedPostings, tenancy.DefaultTenant) - c.postingDataSizeBytes = commonMetrics.dataSizeBytes.WithLabelValues(cacheTypePostings) - c.seriesDataSizeBytes = commonMetrics.dataSizeBytes.WithLabelValues(cacheTypeSeries) - c.expandedPostingDataSizeBytes = commonMetrics.dataSizeBytes.WithLabelValues(cacheTypeExpandedPostings) + c.hitsTotal.WithLabelValues(cacheTypePostings, tenancy.DefaultTenant) + c.hitsTotal.WithLabelValues(cacheTypeSeries, tenancy.DefaultTenant) + c.hitsTotal.WithLabelValues(cacheTypeExpandedPostings, tenancy.DefaultTenant) - c.postingsFetchDuration = commonMetrics.fetchLatency.WithLabelValues(cacheTypePostings) - c.seriesFetchDuration = commonMetrics.fetchLatency.WithLabelValues(cacheTypeSeries) - c.expandedPostingsFetchDuration = commonMetrics.fetchLatency.WithLabelValues(cacheTypeExpandedPostings) + c.fetchLatency.WithLabelValues(cacheTypePostings, tenancy.DefaultTenant) + c.fetchLatency.WithLabelValues(cacheTypeSeries, tenancy.DefaultTenant) + c.fetchLatency.WithLabelValues(cacheTypeExpandedPostings, tenancy.DefaultTenant) level.Info(logger).Log("msg", "created index cache") @@ -83,8 +78,8 @@ func NewRemoteIndexCache(logger log.Logger, cacheClient cacheutil.RemoteCacheCli // StorePostings sets the postings identified by the ulid and label to the value v. // The function enqueues the request and returns immediately: the entry will be // asynchronously stored in the cache. -func (c *RemoteIndexCache) StorePostings(blockID ulid.ULID, l labels.Label, v []byte) { - c.postingDataSizeBytes.Observe(float64(len(v))) +func (c *RemoteIndexCache) StorePostings(blockID ulid.ULID, l labels.Label, v []byte, tenant string) { + c.dataSizeBytes.WithLabelValues(cacheTypePostings, tenant).Observe(float64(len(v))) key := cacheKey{blockID.String(), cacheKeyPostings(l), c.compressionScheme}.string() if err := c.memcached.SetAsync(key, v, memcachedDefaultTTL); err != nil { level.Error(c.logger).Log("msg", "failed to cache postings in memcached", "err", err) @@ -94,8 +89,8 @@ func (c *RemoteIndexCache) StorePostings(blockID ulid.ULID, l labels.Label, v [] // FetchMultiPostings fetches multiple postings - each identified by a label - // and returns a map containing cache hits, along with a list of missing keys. // In case of error, it logs and return an empty cache hits map. -func (c *RemoteIndexCache) FetchMultiPostings(ctx context.Context, blockID ulid.ULID, lbls []labels.Label) (hits map[labels.Label][]byte, misses []labels.Label) { - timer := prometheus.NewTimer(c.postingsFetchDuration) +func (c *RemoteIndexCache) FetchMultiPostings(ctx context.Context, blockID ulid.ULID, lbls []labels.Label, tenant string) (hits map[labels.Label][]byte, misses []labels.Label) { + timer := prometheus.NewTimer(c.fetchLatency.WithLabelValues(cacheTypePostings, tenant)) defer timer.ObserveDuration() keys := make([]string, 0, len(lbls)) @@ -107,7 +102,8 @@ func (c *RemoteIndexCache) FetchMultiPostings(ctx context.Context, blockID ulid. } // Fetch the keys from memcached in a single request. - c.postingRequests.Add(float64(len(keys))) + c.requestTotal.WithLabelValues(cacheTypePostings, tenant).Add(float64(len(keys))) + results := c.memcached.GetMulti(ctx, keys) if len(results) == 0 { return nil, lbls @@ -127,16 +123,15 @@ func (c *RemoteIndexCache) FetchMultiPostings(ctx context.Context, blockID ulid. hits[lbl] = value } - - c.postingHits.Add(float64(len(hits))) + c.hitsTotal.WithLabelValues(cacheTypePostings, tenant).Add(float64(len(hits))) return hits, misses } // StoreExpandedPostings sets the postings identified by the ulid and label to the value v. // The function enqueues the request and returns immediately: the entry will be // asynchronously stored in the cache. -func (c *RemoteIndexCache) StoreExpandedPostings(blockID ulid.ULID, keys []*labels.Matcher, v []byte) { - c.expandedPostingDataSizeBytes.Observe(float64(len(v))) +func (c *RemoteIndexCache) StoreExpandedPostings(blockID ulid.ULID, keys []*labels.Matcher, v []byte, tenant string) { + c.dataSizeBytes.WithLabelValues(cacheTypeExpandedPostings, tenant).Observe(float64(len(v))) key := cacheKey{blockID.String(), cacheKeyExpandedPostings(labelMatchersToString(keys)), c.compressionScheme}.string() if err := c.memcached.SetAsync(key, v, memcachedDefaultTTL); err != nil { @@ -147,20 +142,20 @@ func (c *RemoteIndexCache) StoreExpandedPostings(blockID ulid.ULID, keys []*labe // FetchExpandedPostings fetches multiple postings - each identified by a label - // and returns a map containing cache hits, along with a list of missing keys. // In case of error, it logs and return an empty cache hits map. -func (c *RemoteIndexCache) FetchExpandedPostings(ctx context.Context, blockID ulid.ULID, lbls []*labels.Matcher) ([]byte, bool) { - timer := prometheus.NewTimer(c.expandedPostingsFetchDuration) +func (c *RemoteIndexCache) FetchExpandedPostings(ctx context.Context, blockID ulid.ULID, lbls []*labels.Matcher, tenant string) ([]byte, bool) { + timer := prometheus.NewTimer(c.fetchLatency.WithLabelValues(cacheTypeExpandedPostings, tenant)) defer timer.ObserveDuration() key := cacheKey{blockID.String(), cacheKeyExpandedPostings(labelMatchersToString(lbls)), c.compressionScheme}.string() // Fetch the keys from memcached in a single request. - c.expandedPostingRequests.Add(1) + c.requestTotal.WithLabelValues(cacheTypeExpandedPostings, tenant).Add(1) results := c.memcached.GetMulti(ctx, []string{key}) if len(results) == 0 { return nil, false } if res, ok := results[key]; ok { - c.expandedPostingHits.Add(1) + c.hitsTotal.WithLabelValues(cacheTypeExpandedPostings, tenant).Add(1) return res, true } return nil, false @@ -169,8 +164,8 @@ func (c *RemoteIndexCache) FetchExpandedPostings(ctx context.Context, blockID ul // StoreSeries sets the series identified by the ulid and id to the value v. // The function enqueues the request and returns immediately: the entry will be // asynchronously stored in the cache. -func (c *RemoteIndexCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte) { - c.seriesDataSizeBytes.Observe(float64(len(v))) +func (c *RemoteIndexCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte, tenant string) { + c.dataSizeBytes.WithLabelValues(cacheTypeSeries, tenant).Observe(float64(len(v))) key := cacheKey{blockID.String(), cacheKeySeries(id), ""}.string() if err := c.memcached.SetAsync(key, v, memcachedDefaultTTL); err != nil { @@ -181,8 +176,8 @@ func (c *RemoteIndexCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, // FetchMultiSeries fetches multiple series - each identified by ID - from the cache // and returns a map containing cache hits, along with a list of missing IDs. // In case of error, it logs and return an empty cache hits map. -func (c *RemoteIndexCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULID, ids []storage.SeriesRef) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) { - timer := prometheus.NewTimer(c.seriesFetchDuration) +func (c *RemoteIndexCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULID, ids []storage.SeriesRef, tenant string) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) { + timer := prometheus.NewTimer(c.fetchLatency.WithLabelValues(cacheTypeSeries, tenant)) defer timer.ObserveDuration() keys := make([]string, 0, len(ids)) @@ -194,7 +189,7 @@ func (c *RemoteIndexCache) FetchMultiSeries(ctx context.Context, blockID ulid.UL } // Fetch the keys from memcached in a single request. - c.seriesRequests.Add(float64(len(ids))) + c.requestTotal.WithLabelValues(cacheTypeSeries, tenant).Add(float64(len(ids))) results := c.memcached.GetMulti(ctx, keys) if len(results) == 0 { return nil, ids @@ -214,8 +209,7 @@ func (c *RemoteIndexCache) FetchMultiSeries(ctx context.Context, blockID ulid.UL hits[id] = value } - - c.seriesHits.Add(float64(len(hits))) + c.hitsTotal.WithLabelValues(cacheTypeSeries, tenant).Add(float64(len(hits))) return hits, misses } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go b/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go index 2e02836c0c..4fb4a155f9 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go @@ -148,6 +148,7 @@ func fetchLazyExpandedPostings( addAllPostings bool, lazyExpandedPostingEnabled bool, lazyExpandedPostingSizeBytes prometheus.Counter, + tenant string, ) (*lazyExpandedPostings, error) { var ( err error @@ -178,7 +179,7 @@ func fetchLazyExpandedPostings( } } - ps, matchers, err := fetchAndExpandPostingGroups(ctx, r, postingGroups, bytesLimiter) + ps, matchers, err := fetchAndExpandPostingGroups(ctx, r, postingGroups, bytesLimiter, tenant) if err != nil { return nil, err } @@ -220,9 +221,9 @@ func keysToFetchFromPostingGroups(postingGroups []*postingGroup) ([]labels.Label return keys, lazyMatchers } -func fetchAndExpandPostingGroups(ctx context.Context, r *bucketIndexReader, postingGroups []*postingGroup, bytesLimiter BytesLimiter) ([]storage.SeriesRef, []*labels.Matcher, error) { +func fetchAndExpandPostingGroups(ctx context.Context, r *bucketIndexReader, postingGroups []*postingGroup, bytesLimiter BytesLimiter, tenant string) ([]storage.SeriesRef, []*labels.Matcher, error) { keys, lazyMatchers := keysToFetchFromPostingGroups(postingGroups) - fetchedPostings, closeFns, err := r.fetchPostings(ctx, keys, bytesLimiter) + fetchedPostings, closeFns, err := r.fetchPostings(ctx, keys, bytesLimiter, tenant) defer func() { for _, closeFn := range closeFns { closeFn() diff --git a/vendor/github.com/thanos-io/thanos/pkg/tenancy/tenancy.go b/vendor/github.com/thanos-io/thanos/pkg/tenancy/tenancy.go index 13775cf6e1..4a874855fc 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/tenancy/tenancy.go +++ b/vendor/github.com/thanos-io/thanos/pkg/tenancy/tenancy.go @@ -24,6 +24,8 @@ const ( DefaultTenantLabel = "tenant_id" // This key is used to pass tenant information using Context. TenantKey contextKey = 0 + // MetricLabel is the label name used for adding tenant information to exported metrics. + MetricLabel = "tenant" ) // Allowed fields in client certificates. diff --git a/vendor/modules.txt b/vendor/modules.txt index b74cc59f1f..7f22de0558 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -902,7 +902,7 @@ github.com/thanos-io/promql-engine/logicalplan github.com/thanos-io/promql-engine/parser github.com/thanos-io/promql-engine/query github.com/thanos-io/promql-engine/worker -# github.com/thanos-io/thanos v0.32.4-0.20231001083734-531cdb1e8ec3 +# github.com/thanos-io/thanos v0.32.5-0.20231006043659-79bbf34b4275 ## explicit; go 1.18 github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader From c3e04bd8a7a8d63274ce3f25f04a9e1100a16088 Mon Sep 17 00:00:00 2001 From: Emmanuel Lodovice Date: Sat, 7 Oct 2023 10:16:08 -0700 Subject: [PATCH 09/13] Add AM aggregation group metrics (#5592) Signed-off-by: Emmanuel Lodovice --- CHANGELOG.md | 1 + pkg/alertmanager/alertmanager_metrics.go | 14 ++++++++++++ pkg/alertmanager/alertmanager_metrics_test.go | 22 +++++++++++++++++++ 3 files changed, 37 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b91b4c0b6d..5233a07d3e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ * [FEATURE] Query Frontend: Add `-frontend.retry-on-too-many-outstanding-requests` to re-enqueue 429 requests if there are multiple query-schedulers available. #5496 * [FEATURE] Store Gateway: Add `-blocks-storage.bucket-store.max-inflight-requests` for store gateways to reject further requests upon reaching the limit. #5553 * [FEATURE] Store Gateway: Add `cortex_bucket_store_block_load_duration_seconds` histogram to track time to load blocks. #5580 +* [FEATURE] AlertManager: Add `cortex_alertmanager_dispatcher_aggregation_groups` and `cortex_alertmanager_dispatcher_alert_processing_duration_seconds` metrics for dispatcher. #5592 * [ENHANCEMENT] Distributor/Ingester: Add span on push path #5319 * [ENHANCEMENT] Support object storage backends for runtime configuration file. #5292 * [ENHANCEMENT] Query Frontend: Reject subquery with too small step size. #5323 diff --git a/pkg/alertmanager/alertmanager_metrics.go b/pkg/alertmanager/alertmanager_metrics.go index dbedd9bca4..21d77d2b4b 100644 --- a/pkg/alertmanager/alertmanager_metrics.go +++ b/pkg/alertmanager/alertmanager_metrics.go @@ -60,6 +60,8 @@ type alertmanagerMetrics struct { persistFailed *prometheus.Desc notificationRateLimited *prometheus.Desc + dispatcherAggregationGroups *prometheus.Desc + dispatcherProcessingDuration *prometheus.Desc dispatcherAggregationGroupsLimitReached *prometheus.Desc insertAlertFailures *prometheus.Desc alertsLimiterAlertsCount *prometheus.Desc @@ -217,6 +219,14 @@ func newAlertmanagerMetrics() *alertmanagerMetrics { "cortex_alertmanager_dispatcher_aggregation_group_limit_reached_total", "Number of times when dispatcher failed to create new aggregation group due to limit.", []string{"user"}, nil), + dispatcherAggregationGroups: prometheus.NewDesc( + "cortex_alertmanager_dispatcher_aggregation_groups", + "Number of active aggregation groups.", + []string{"user"}, nil), + dispatcherProcessingDuration: prometheus.NewDesc( + "cortex_alertmanager_dispatcher_alert_processing_duration_seconds", + "Summary of latencies for the processing of alerts.", + []string{"user"}, nil), insertAlertFailures: prometheus.NewDesc( "cortex_alertmanager_alerts_insert_limited_total", "Total number of failures to store alert due to hitting alertmanager limits.", @@ -279,6 +289,8 @@ func (m *alertmanagerMetrics) Describe(out chan<- *prometheus.Desc) { out <- m.persistTotal out <- m.persistFailed out <- m.notificationRateLimited + out <- m.dispatcherAggregationGroups + out <- m.dispatcherProcessingDuration out <- m.dispatcherAggregationGroupsLimitReached out <- m.insertAlertFailures out <- m.alertsLimiterAlertsCount @@ -330,6 +342,8 @@ func (m *alertmanagerMetrics) Collect(out chan<- prometheus.Metric) { data.SendSumOfCounters(out, m.persistFailed, "alertmanager_state_persist_failed_total") data.SendSumOfCountersPerUserWithLabels(out, m.notificationRateLimited, "alertmanager_notification_rate_limited_total", "integration") + data.SendSumOfGaugesPerUser(out, m.dispatcherAggregationGroups, "alertmanager_dispatcher_aggregation_groups") + data.SendSumOfSummariesPerUser(out, m.dispatcherProcessingDuration, "alertmanager_dispatcher_alert_processing_duration_seconds") data.SendSumOfCountersPerUser(out, m.dispatcherAggregationGroupsLimitReached, "alertmanager_dispatcher_aggregation_group_limit_reached_total") data.SendSumOfCountersPerUser(out, m.insertAlertFailures, "alertmanager_alerts_insert_limited_total") data.SendSumOfGaugesPerUser(out, m.alertsLimiterAlertsCount, "alertmanager_alerts_limiter_current_alerts") diff --git a/pkg/alertmanager/alertmanager_metrics_test.go b/pkg/alertmanager/alertmanager_metrics_test.go index dac8753c20..25cad5f344 100644 --- a/pkg/alertmanager/alertmanager_metrics_test.go +++ b/pkg/alertmanager/alertmanager_metrics_test.go @@ -60,6 +60,14 @@ func TestAlertmanagerMetricsStore(t *testing.T) { cortex_alertmanager_config_hash{user="user1"} 0 cortex_alertmanager_config_hash{user="user2"} 0 cortex_alertmanager_config_hash{user="user3"} 0 + # HELP cortex_alertmanager_dispatcher_alert_processing_duration_seconds Summary of latencies for the processing of alerts. + # TYPE cortex_alertmanager_dispatcher_alert_processing_duration_seconds summary + cortex_alertmanager_dispatcher_alert_processing_duration_seconds_sum{user="user1"} 0 + cortex_alertmanager_dispatcher_alert_processing_duration_seconds_count{user="user1"} 0 + cortex_alertmanager_dispatcher_alert_processing_duration_seconds_sum{user="user2"} 0 + cortex_alertmanager_dispatcher_alert_processing_duration_seconds_count{user="user2"} 0 + cortex_alertmanager_dispatcher_alert_processing_duration_seconds_sum{user="user3"} 0 + cortex_alertmanager_dispatcher_alert_processing_duration_seconds_count{user="user3"} 0 # HELP cortex_alertmanager_nflog_gc_duration_seconds Duration of the last notification log garbage collection cycle. # TYPE cortex_alertmanager_nflog_gc_duration_seconds summary cortex_alertmanager_nflog_gc_duration_seconds_sum 111 @@ -354,6 +362,14 @@ func TestAlertmanagerMetricsRemoval(t *testing.T) { cortex_alertmanager_config_hash{user="user1"} 0 cortex_alertmanager_config_hash{user="user2"} 0 cortex_alertmanager_config_hash{user="user3"} 0 + # HELP cortex_alertmanager_dispatcher_alert_processing_duration_seconds Summary of latencies for the processing of alerts. + # TYPE cortex_alertmanager_dispatcher_alert_processing_duration_seconds summary + cortex_alertmanager_dispatcher_alert_processing_duration_seconds_sum{user="user1"} 0 + cortex_alertmanager_dispatcher_alert_processing_duration_seconds_count{user="user1"} 0 + cortex_alertmanager_dispatcher_alert_processing_duration_seconds_sum{user="user2"} 0 + cortex_alertmanager_dispatcher_alert_processing_duration_seconds_count{user="user2"} 0 + cortex_alertmanager_dispatcher_alert_processing_duration_seconds_sum{user="user3"} 0 + cortex_alertmanager_dispatcher_alert_processing_duration_seconds_count{user="user3"} 0 # HELP cortex_alertmanager_nflog_gc_duration_seconds Duration of the last notification log garbage collection cycle. # TYPE cortex_alertmanager_nflog_gc_duration_seconds summary @@ -649,6 +665,12 @@ func TestAlertmanagerMetricsRemoval(t *testing.T) { # TYPE cortex_alertmanager_config_hash gauge cortex_alertmanager_config_hash{user="user1"} 0 cortex_alertmanager_config_hash{user="user2"} 0 + # HELP cortex_alertmanager_dispatcher_alert_processing_duration_seconds Summary of latencies for the processing of alerts. + # TYPE cortex_alertmanager_dispatcher_alert_processing_duration_seconds summary + cortex_alertmanager_dispatcher_alert_processing_duration_seconds_sum{user="user1"} 0 + cortex_alertmanager_dispatcher_alert_processing_duration_seconds_count{user="user1"} 0 + cortex_alertmanager_dispatcher_alert_processing_duration_seconds_sum{user="user2"} 0 + cortex_alertmanager_dispatcher_alert_processing_duration_seconds_count{user="user2"} 0 # HELP cortex_alertmanager_nflog_gc_duration_seconds Duration of the last notification log garbage collection cycle. # TYPE cortex_alertmanager_nflog_gc_duration_seconds summary From 424625f7c5c8f18b6ec73acbbcf7693bf8e0225e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 7 Oct 2023 10:17:15 -0700 Subject: [PATCH 10/13] Bump postcss from 7.0.39 to 8.4.31 in /website (#5590) Bumps [postcss](https://github.com/postcss/postcss) from 7.0.39 to 8.4.31. - [Release notes](https://github.com/postcss/postcss/releases) - [Changelog](https://github.com/postcss/postcss/blob/main/CHANGELOG.md) - [Commits](https://github.com/postcss/postcss/compare/7.0.39...8.4.31) --- updated-dependencies: - dependency-name: postcss dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- website/package-lock.json | 81 +++++++++++++++++++++++++++++++++++---- website/package.json | 2 +- 2 files changed, 74 insertions(+), 9 deletions(-) diff --git a/website/package-lock.json b/website/package-lock.json index 8da97da728..5d285ac0c6 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -89,6 +89,24 @@ "num2fraction": "^1.2.2", "postcss": "^7.0.32", "postcss-value-parser": "^4.1.0" + }, + "dependencies": { + "picocolors": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-0.2.1.tgz", + "integrity": "sha512-cMlDqaLEqfSaW8Z7N5Jw+lyIW869EzT73/F5lhtY9cLGoVxSXznfgfXMO0Z5K0o0Q2TkTXq+0KFsdnSe3jDViA==", + "dev": true + }, + "postcss": { + "version": "7.0.39", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.39.tgz", + "integrity": "sha512-yioayjNbHn6z1/Bywyb2Y4s3yvDAeXGOyxqD+LnVOinq6Mdmd++SW2wUNVzavyyHxd6+DxzWGIuosg6P1Rj8uA==", + "dev": true, + "requires": { + "picocolors": "^0.2.1", + "source-map": "^0.6.1" + } + } } }, "binary-extensions": { @@ -572,6 +590,12 @@ "picomatch": "^2.3.1" } }, + "nanoid": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", + "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==", + "dev": true + }, "normalize-path": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", @@ -637,9 +661,9 @@ "dev": true }, "picocolors": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-0.2.1.tgz", - "integrity": "sha512-cMlDqaLEqfSaW8Z7N5Jw+lyIW869EzT73/F5lhtY9cLGoVxSXznfgfXMO0Z5K0o0Q2TkTXq+0KFsdnSe3jDViA==", + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", "dev": true }, "picomatch": { @@ -655,13 +679,14 @@ "dev": true }, "postcss": { - "version": "7.0.39", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.39.tgz", - "integrity": "sha512-yioayjNbHn6z1/Bywyb2Y4s3yvDAeXGOyxqD+LnVOinq6Mdmd++SW2wUNVzavyyHxd6+DxzWGIuosg6P1Rj8uA==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "dev": true, "requires": { - "picocolors": "^0.2.1", - "source-map": "^0.6.1" + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" } }, "postcss-cli": { @@ -724,6 +749,22 @@ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true }, + "picocolors": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-0.2.1.tgz", + "integrity": "sha512-cMlDqaLEqfSaW8Z7N5Jw+lyIW869EzT73/F5lhtY9cLGoVxSXznfgfXMO0Z5K0o0Q2TkTXq+0KFsdnSe3jDViA==", + "dev": true + }, + "postcss": { + "version": "7.0.39", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.39.tgz", + "integrity": "sha512-yioayjNbHn6z1/Bywyb2Y4s3yvDAeXGOyxqD+LnVOinq6Mdmd++SW2wUNVzavyyHxd6+DxzWGIuosg6P1Rj8uA==", + "dev": true, + "requires": { + "picocolors": "^0.2.1", + "source-map": "^0.6.1" + } + }, "supports-color": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", @@ -755,6 +796,24 @@ "lodash": "^4.17.11", "log-symbols": "^2.2.0", "postcss": "^7.0.7" + }, + "dependencies": { + "picocolors": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-0.2.1.tgz", + "integrity": "sha512-cMlDqaLEqfSaW8Z7N5Jw+lyIW869EzT73/F5lhtY9cLGoVxSXznfgfXMO0Z5K0o0Q2TkTXq+0KFsdnSe3jDViA==", + "dev": true + }, + "postcss": { + "version": "7.0.39", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.39.tgz", + "integrity": "sha512-yioayjNbHn6z1/Bywyb2Y4s3yvDAeXGOyxqD+LnVOinq6Mdmd++SW2wUNVzavyyHxd6+DxzWGIuosg6P1Rj8uA==", + "dev": true, + "requires": { + "picocolors": "^0.2.1", + "source-map": "^0.6.1" + } + } } }, "postcss-value-parser": { @@ -844,6 +903,12 @@ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", "dev": true }, + "source-map-js": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", + "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", + "dev": true + }, "sprintf-js": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", diff --git a/website/package.json b/website/package.json index b0cedfa9fe..1b08b0ae98 100644 --- a/website/package.json +++ b/website/package.json @@ -20,6 +20,6 @@ "devDependencies": { "autoprefixer": "^9.8.5", "postcss-cli": "^7.1.2", - "postcss": "^7.0.39" + "postcss": "^8.4.31" } } From f16bb49b1b400c88ca8630adeb3b2f6e62edb8fd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 11:02:09 -0700 Subject: [PATCH 11/13] Bump github.com/aws/aws-sdk-go from 1.44.327 to 1.45.24 (#5595) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.327 to 1.45.24. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.327...v1.45.24) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 +- .../aws/corehandlers/awsinternal.go | 4 + .../aws-sdk-go/aws/corehandlers/user_agent.go | 10 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 1 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 1614 ++++++++++++++--- .../aws-sdk-go/aws/session/shared_config.go | 25 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws-sdk-go/internal/ini/literal_tokens.go | 57 +- .../aws/aws-sdk-go/internal/ini/visitor.go | 6 +- .../aws/aws-sdk-go/service/dynamodb/api.go | 149 +- vendor/modules.txt | 2 +- 12 files changed, 1573 insertions(+), 303 deletions(-) create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go diff --git a/go.mod b/go.mod index 0136803094..5cfabb7ef9 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 github.com/alicebob/miniredis/v2 v2.30.4 github.com/armon/go-metrics v0.4.1 - github.com/aws/aws-sdk-go v1.44.327 + github.com/aws/aws-sdk-go v1.45.24 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/cespare/xxhash v1.1.0 github.com/cortexproject/promqlsmith v0.0.0-20230502194647-ed3e43bb7a52 diff --git a/go.sum b/go.sum index caf94052e2..dc95c58bfd 100644 --- a/go.sum +++ b/go.sum @@ -456,8 +456,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.327 h1:ZS8oO4+7MOBLhkdwIhgtVeDzCeWOlTfKJS7EgggbIEY= -github.com/aws/aws-sdk-go v1.44.327/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.45.24 h1:TZx/CizkmCQn8Rtsb11iLYutEQVGK5PK9wAhwouELBo= +github.com/aws/aws-sdk-go v1.45.24/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v1.16.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI= github.com/aws/aws-sdk-go-v2 v1.16.16 h1:M1fj4FE2lB4NzRb9Y0xdWsn2P0+2UHVxwKyOa4YJNjk= github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k= diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go new file mode 100644 index 0000000000..140242dd1b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go @@ -0,0 +1,4 @@ +// DO NOT EDIT +package corehandlers + +const isAwsInternal = "" \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go index ab69c7a6f3..ac842c55d8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -35,3 +35,13 @@ var AddHostExecEnvUserAgentHander = request.NamedHandler{ request.AddToUserAgent(r, execEnvUAKey+"/"+v) }, } + +var AddAwsInternal = request.NamedHandler{ + Name: "core.AddAwsInternal", + Fn: func(r *request.Request) { + if len(isAwsInternal) == 0 { + return + } + request.AddToUserAgent(r, isAwsInternal) + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 23bb639e01..e39903284d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -74,6 +74,7 @@ func Handlers() request.Handlers { handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) handlers.Validate.AfterEachFn = request.HandlerListStopOnError handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddAwsInternal) handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) handlers.Build.AfterEachFn = request.HandlerListStopOnError handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index a515362a1a..1a2513b2ac 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -1058,6 +1058,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -2569,21 +2572,81 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "appflow-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "appflow-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "appflow-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "appflow-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-west-2.amazonaws.com", + }, }, }, "application-autoscaling": service{ @@ -2928,6 +2991,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "appmesh.eu-west-3.api.aws", }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.il-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3670,6 +3742,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -4053,6 +4134,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -4334,6 +4418,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -4381,6 +4468,118 @@ var awsPartition = partition{ }, }, }, + "bedrock": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "bedrock-ap-northeast-1", + }: endpoint{ + Hostname: "bedrock.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "bedrock-ap-southeast-1", + }: endpoint{ + Hostname: "bedrock.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "bedrock-fips-us-east-1", + }: endpoint{ + Hostname: "bedrock-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-fips-us-west-2", + }: endpoint{ + Hostname: "bedrock-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ap-northeast-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ap-southeast-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-us-east-1", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-us-west-2", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-us-east-1", + }: endpoint{ + Hostname: "bedrock-runtime.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-us-west-2", + }: endpoint{ + Hostname: "bedrock-runtime.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "bedrock-us-east-1", + }: endpoint{ + Hostname: "bedrock.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-us-west-2", + }: endpoint{ + Hostname: "bedrock.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "billingconductor": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -5684,6 +5883,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -6256,122 +6458,131 @@ var awsPartition = partition{ Region: "ap-northeast-2", }: endpoint{}, endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", - }, - }, - }, - "cognito-idp": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", + }, + }, + }, + "cognito-idp": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", }: endpoint{}, endpointKey{ Region: "ap-south-1", @@ -6439,6 +6650,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7072,6 +7286,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, @@ -7167,6 +7384,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7197,12 +7417,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7212,6 +7438,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7897,6 +8129,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -7944,6 +8179,185 @@ var awsPartition = partition{ }, }, }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "datazone.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "datazone.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "datazone.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "datazone.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "datazone.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "datazone.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "datazone.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "datazone.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "datazone.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "datazone.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "datazone.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "datazone.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "datazone.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "datazone.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "datazone.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "datazone.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "datazone.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "datazone.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "datazone.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "datazone.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "datazone.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "datazone.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "datazone.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "datazone.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "datazone.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "datazone.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "datazone.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "datazone.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-west-2.amazonaws.com", + }, + }, + }, "dax": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -8737,6 +9151,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -10978,6 +11395,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -10996,6 +11419,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "email-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -11005,6 +11437,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "email-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "email-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -11014,6 +11464,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -11032,9 +11485,21 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -11337,63 +11802,183 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.af-south-1.api.aws", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-south-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-3.api.aws", + }, endpointKey{ Region: "ap-southeast-4", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-central-2", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-north-1.api.aws", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-south-1.api.aws", + }, endpointKey{ Region: "eu-south-2", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-3.api.aws", + }, endpointKey{ Region: "fips", }: endpoint{ @@ -11406,18 +11991,48 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.sa-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, @@ -11436,6 +12051,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, @@ -11454,6 +12075,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, @@ -11472,6 +12099,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, @@ -12188,6 +12821,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -12585,6 +13221,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -13483,6 +14122,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -13760,6 +14402,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -13775,6 +14420,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -14019,6 +14667,9 @@ var awsPartition = partition{ }, "inspector2": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -14028,6 +14679,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -14037,12 +14691,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -14215,7 +14875,7 @@ var awsPartition = partition{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{ - Hostname: "internetmonitor-fips.ca-central-1.api.aws", + Hostname: "internetmonitor-fips.ca-central-1.amazonaws.com", }, endpointKey{ Region: "eu-central-1", @@ -14286,7 +14946,7 @@ var awsPartition = partition{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "internetmonitor-fips.us-east-1.api.aws", + Hostname: "internetmonitor-fips.us-east-1.amazonaws.com", }, endpointKey{ Region: "us-east-2", @@ -14297,7 +14957,7 @@ var awsPartition = partition{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ - Hostname: "internetmonitor-fips.us-east-2.api.aws", + Hostname: "internetmonitor-fips.us-east-2.amazonaws.com", }, endpointKey{ Region: "us-west-1", @@ -14308,7 +14968,7 @@ var awsPartition = partition{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "internetmonitor-fips.us-west-1.api.aws", + Hostname: "internetmonitor-fips.us-west-1.amazonaws.com", }, endpointKey{ Region: "us-west-2", @@ -14319,7 +14979,7 @@ var awsPartition = partition{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ - Hostname: "internetmonitor-fips.us-west-2.api.aws", + Hostname: "internetmonitor-fips.us-west-2.amazonaws.com", }, }, }, @@ -15448,6 +16108,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -17084,6 +17747,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -17093,12 +17759,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -17150,6 +17822,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -17244,142 +17922,154 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com", - }, - }, - }, - "license-manager-user-subscriptions": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com", + }, + }, + }, + "license-manager-user-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -17425,6 +18115,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -17964,6 +18657,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -18030,6 +18726,13 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "managedblockchain-query": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, "marketplacecommerceanalytics": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -18155,6 +18858,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -18164,6 +18870,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18554,12 +19263,33 @@ var awsPartition = partition{ }, "meetings-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -18995,6 +19725,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -19781,6 +20514,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -20075,6 +20811,14 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "oidc.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -20115,6 +20859,14 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "oidc.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -20217,6 +20969,14 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "omics.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "us-east-1", }: endpoint{ @@ -20500,6 +21260,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -20892,6 +21658,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -20907,12 +21676,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -21159,6 +21934,14 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "portal.sso.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -21199,6 +21982,14 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "portal.sso.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -22525,6 +23316,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "rekognition-fips.ca-central-1", }: endpoint{ @@ -22846,6 +23640,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "resource-explorer-2.ap-southeast-2.api.aws", }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "resource-explorer-2.ap-southeast-3.api.aws", + }, endpointKey{ Region: "ap-southeast-4", }: endpoint{ @@ -22891,6 +23690,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "resource-explorer-2.il-central-1.api.aws", }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "resource-explorer-2.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{ @@ -24629,6 +25433,9 @@ var awsPartition = partition{ Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -26075,6 +26882,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -27673,6 +28483,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -27688,6 +28501,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -27933,6 +28749,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -28808,15 +29627,30 @@ var awsPartition = partition{ }, "tnb": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -29239,6 +30073,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -31747,6 +32584,20 @@ var awscnPartition = partition{ }, }, }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "api.sagemaker": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -32164,6 +33015,31 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "datazone.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "datazone.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "dax": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -32406,9 +33282,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "events": service{ @@ -32566,6 +33454,16 @@ var awscnPartition = partition{ }, }, }, + "identitystore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "internetmonitor": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -32865,6 +33763,26 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "oidc": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "oidc.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "oidc.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "organizations": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, @@ -32903,6 +33821,26 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "portal.sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "portal.sso.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "ram": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -33334,6 +34272,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "states": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -34853,6 +35801,13 @@ var awsusgovPartition = partition{ }, }, }, + "codestar-connections": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + }, + }, "cognito-identity": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -35196,6 +36151,31 @@ var awsusgovPartition = partition{ }, }, }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "datazone.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "datazone.us-gov-west-1.api.aws", + }, + }, + }, "directconnect": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -35864,6 +36844,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, @@ -35882,6 +36868,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, @@ -36118,6 +37110,28 @@ var awsusgovPartition = partition{ }, }, }, + "geo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "geo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "geo-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "glacier": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -37192,6 +38206,36 @@ var awsusgovPartition = partition{ }, }, }, + "m2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, "managedblockchain": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -38069,6 +39113,16 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "rolesanywhere": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "route53": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, @@ -40138,14 +41192,45 @@ var awsisoPartition = partition{ }, "elasticmapreduce": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov", + }, }, }, "es": service{ @@ -40931,9 +42016,24 @@ var awsisobPartition = partition{ }, "elasticmapreduce": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov", + }, }, }, "es": service{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index ea3ac0d031..8f1388f9fb 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -389,8 +389,15 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e updateString(&cfg.Region, section, regionKey) updateString(&cfg.CustomCABundle, section, customCABundleKey) + // we're retaining a behavioral quirk with this field that existed before + // the removal of literal parsing for (aws-sdk-go-v2/#2276): + // - if the key is missing, the config field will not be set + // - if the key is set to a non-numeric, the config field will be set to 0 if section.Has(roleDurationSecondsKey) { - d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second + var d time.Duration + if v, ok := section.Int(roleDurationSecondsKey); ok { + d = time.Duration(v) * time.Second + } cfg.AssumeRoleDuration = &d } @@ -668,7 +675,10 @@ func updateBool(dst *bool, section ini.Section, key string) { if !section.Has(key) { return } - *dst = section.Bool(key) + + // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false + v, _ := section.Bool(key) + *dst = v } // updateBoolPtr will only update the dst with the value in the section key, @@ -677,8 +687,11 @@ func updateBoolPtr(dst **bool, section ini.Section, key string) { if !section.Has(key) { return } + + // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false + v, _ := section.Bool(key) *dst = new(bool) - **dst = section.Bool(key) + **dst = v } // SharedConfigLoadError is an error for the shared config file failed to load. @@ -805,7 +818,8 @@ func updateUseDualStackEndpoint(dst *endpoints.DualStackEndpointState, section i return } - if section.Bool(key) { + // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { *dst = endpoints.DualStackEndpointStateEnabled } else { *dst = endpoints.DualStackEndpointStateDisabled @@ -821,7 +835,8 @@ func updateUseFIPSEndpoint(dst *endpoints.FIPSEndpointState, section ini.Section return } - if section.Bool(key) { + // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { *dst = endpoints.FIPSEndpointStateEnabled } else { *dst = endpoints.FIPSEndpointStateDisabled diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 39522c2609..1200b87ce6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.327" +const SDKVersion = "1.45.24" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go index 34a481afbd..b1b686086a 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -154,11 +154,11 @@ func (v ValueType) String() string { // ValueType enums const ( NoneType = ValueType(iota) - DecimalType - IntegerType + DecimalType // deprecated + IntegerType // deprecated StringType QuotedStringType - BoolType + BoolType // deprecated ) // Value is a union container @@ -166,9 +166,9 @@ type Value struct { Type ValueType raw []rune - integer int64 - decimal float64 - boolean bool + integer int64 // deprecated + decimal float64 // deprecated + boolean bool // deprecated str string } @@ -253,24 +253,6 @@ func newLitToken(b []rune) (Token, int, error) { } token = newToken(TokenLit, b[:n], QuotedStringType) - } else if isNumberValue(b) { - var base int - base, n, err = getNumericalValue(b) - if err != nil { - return token, 0, err - } - - value := b[:n] - vType := IntegerType - if contains(value, '.') || hasExponent(value) { - vType = DecimalType - } - token = newToken(TokenLit, value, vType) - token.base = base - } else if isBoolValue(b) { - n, err = getBoolValue(b) - - token = newToken(TokenLit, b[:n], BoolType) } else { n, err = getValue(b) token = newToken(TokenLit, b[:n], StringType) @@ -280,18 +262,33 @@ func newLitToken(b []rune) (Token, int, error) { } // IntValue returns an integer value -func (v Value) IntValue() int64 { - return v.integer +func (v Value) IntValue() (int64, bool) { + i, err := strconv.ParseInt(string(v.raw), 0, 64) + if err != nil { + return 0, false + } + return i, true } // FloatValue returns a float value -func (v Value) FloatValue() float64 { - return v.decimal +func (v Value) FloatValue() (float64, bool) { + f, err := strconv.ParseFloat(string(v.raw), 64) + if err != nil { + return 0, false + } + return f, true } // BoolValue returns a bool value -func (v Value) BoolValue() bool { - return v.boolean +func (v Value) BoolValue() (bool, bool) { + // we don't use ParseBool as it recognizes more than what we've + // historically supported + if isCaselessLitValue(runesTrue, v.raw) { + return true, true + } else if isCaselessLitValue(runesFalse, v.raw) { + return false, true + } + return false, false } func isTrimmable(r rune) bool { diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go index 081cf43342..1d08e138ab 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -145,17 +145,17 @@ func (t Section) ValueType(k string) (ValueType, bool) { } // Bool returns a bool value at k -func (t Section) Bool(k string) bool { +func (t Section) Bool(k string) (bool, bool) { return t.values[k].BoolValue() } // Int returns an integer value at k -func (t Section) Int(k string) int64 { +func (t Section) Int(k string) (int64, bool) { return t.values[k].IntValue() } // Float64 returns a float value at k -func (t Section) Float64(k string) float64 { +func (t Section) Float64(k string) (float64, bool) { return t.values[k].FloatValue() } diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go index a250c7622d..58b2ecfbee 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go @@ -3991,9 +3991,10 @@ func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Req // ListBackups API operation for Amazon DynamoDB. // -// List backups associated with an Amazon Web Services account. To list backups -// for a given table, specify TableName. ListBackups returns a paginated list -// of results with at most 1 MB worth of items in a page. You can also specify +// List DynamoDB backups that are associated with an Amazon Web Services account +// and weren't made with Amazon Web Services Backup. To list these backups for +// a given table, specify TableName. ListBackups returns a paginated list of +// results with at most 1 MB worth of items in a page. You can also specify // a maximum number of entries to be returned in a page. // // In the request, start time is inclusive, but end time is exclusive. Note @@ -4001,6 +4002,9 @@ func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Req // // You can call ListBackups a maximum of five times per second. // +// If you want to retrieve the complete list of backups made with Amazon Web +// Services Backup, use the Amazon Web Services Backup list API. (https://docs.aws.amazon.com/aws-backup/latest/devguide/API_ListBackupJobs.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -14214,12 +14218,20 @@ type ExportDescription struct { // Point in time from which table data was exported. ExportTime *time.Time `type:"timestamp"` + // Choice of whether to execute as a full export or incremental export. Valid + // values are FULL_EXPORT or INCREMENTAL_EXPORT. If INCREMENTAL_EXPORT is provided, + // the IncrementalExportSpecification must also be used. + ExportType *string `type:"string" enum:"ExportType"` + // Status code for the result of the failed export. FailureCode *string `type:"string"` // Export failure reason description. FailureMessage *string `type:"string"` + // Optional object containing the parameters specific to an incremental export. + IncrementalExportSpecification *IncrementalExportSpecification `type:"structure"` + // The number of items exported. ItemCount *int64 `type:"long"` @@ -14322,6 +14334,12 @@ func (s *ExportDescription) SetExportTime(v time.Time) *ExportDescription { return s } +// SetExportType sets the ExportType field's value. +func (s *ExportDescription) SetExportType(v string) *ExportDescription { + s.ExportType = &v + return s +} + // SetFailureCode sets the FailureCode field's value. func (s *ExportDescription) SetFailureCode(v string) *ExportDescription { s.FailureCode = &v @@ -14334,6 +14352,12 @@ func (s *ExportDescription) SetFailureMessage(v string) *ExportDescription { return s } +// SetIncrementalExportSpecification sets the IncrementalExportSpecification field's value. +func (s *ExportDescription) SetIncrementalExportSpecification(v *IncrementalExportSpecification) *ExportDescription { + s.IncrementalExportSpecification = v + return s +} + // SetItemCount sets the ItemCount field's value. func (s *ExportDescription) SetItemCount(v int64) *ExportDescription { s.ItemCount = &v @@ -14462,6 +14486,11 @@ type ExportSummary struct { // Export can be in one of the following states: IN_PROGRESS, COMPLETED, or // FAILED. ExportStatus *string `type:"string" enum:"ExportStatus"` + + // Choice of whether to execute as a full export or incremental export. Valid + // values are FULL_EXPORT or INCREMENTAL_EXPORT. If INCREMENTAL_EXPORT is provided, + // the IncrementalExportSpecification must also be used. + ExportType *string `type:"string" enum:"ExportType"` } // String returns the string representation. @@ -14494,6 +14523,12 @@ func (s *ExportSummary) SetExportStatus(v string) *ExportSummary { return s } +// SetExportType sets the ExportType field's value. +func (s *ExportSummary) SetExportType(v string) *ExportSummary { + s.ExportType = &v + return s +} + type ExportTableToPointInTimeInput struct { _ struct{} `type:"structure"` @@ -14519,6 +14554,14 @@ type ExportTableToPointInTimeInput struct { // state at this point in time. ExportTime *time.Time `type:"timestamp"` + // Choice of whether to execute as a full export or incremental export. Valid + // values are FULL_EXPORT or INCREMENTAL_EXPORT. If INCREMENTAL_EXPORT is provided, + // the IncrementalExportSpecification must also be used. + ExportType *string `type:"string" enum:"ExportType"` + + // Optional object containing the parameters specific to an incremental export. + IncrementalExportSpecification *IncrementalExportSpecification `type:"structure"` + // The name of the Amazon S3 bucket to export the snapshot to. // // S3Bucket is a required field @@ -14605,6 +14648,18 @@ func (s *ExportTableToPointInTimeInput) SetExportTime(v time.Time) *ExportTableT return s } +// SetExportType sets the ExportType field's value. +func (s *ExportTableToPointInTimeInput) SetExportType(v string) *ExportTableToPointInTimeInput { + s.ExportType = &v + return s +} + +// SetIncrementalExportSpecification sets the IncrementalExportSpecification field's value. +func (s *ExportTableToPointInTimeInput) SetIncrementalExportSpecification(v *IncrementalExportSpecification) *ExportTableToPointInTimeInput { + s.IncrementalExportSpecification = v + return s +} + // SetS3Bucket sets the S3Bucket field's value. func (s *ExportTableToPointInTimeInput) SetS3Bucket(v string) *ExportTableToPointInTimeInput { s.S3Bucket = &v @@ -16522,6 +16577,62 @@ func (s *ImportTableOutput) SetImportTableDescription(v *ImportTableDescription) return s } +// Optional object containing the parameters specific to an incremental export. +type IncrementalExportSpecification struct { + _ struct{} `type:"structure"` + + // Time in the past which provides the inclusive start range for the export + // table's data, counted in seconds from the start of the Unix epoch. The incremental + // export will reflect the table's state including and after this point in time. + ExportFromTime *time.Time `type:"timestamp"` + + // Time in the past which provides the exclusive end range for the export table's + // data, counted in seconds from the start of the Unix epoch. The incremental + // export will reflect the table's state just prior to this point in time. If + // this is not provided, the latest time with data available will be used. + ExportToTime *time.Time `type:"timestamp"` + + // Choice of whether to output the previous item image prior to the start time + // of the incremental export. Valid values are NEW_AND_OLD_IMAGES and NEW_IMAGES. + ExportViewType *string `type:"string" enum:"ExportViewType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IncrementalExportSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IncrementalExportSpecification) GoString() string { + return s.String() +} + +// SetExportFromTime sets the ExportFromTime field's value. +func (s *IncrementalExportSpecification) SetExportFromTime(v time.Time) *IncrementalExportSpecification { + s.ExportFromTime = &v + return s +} + +// SetExportToTime sets the ExportToTime field's value. +func (s *IncrementalExportSpecification) SetExportToTime(v time.Time) *IncrementalExportSpecification { + s.ExportToTime = &v + return s +} + +// SetExportViewType sets the ExportViewType field's value. +func (s *IncrementalExportSpecification) SetExportViewType(v string) *IncrementalExportSpecification { + s.ExportViewType = &v + return s +} + // The operation tried to access a nonexistent index. type IndexNotFoundException struct { _ struct{} `type:"structure"` @@ -26926,6 +27037,38 @@ func ExportStatus_Values() []string { } } +const ( + // ExportTypeFullExport is a ExportType enum value + ExportTypeFullExport = "FULL_EXPORT" + + // ExportTypeIncrementalExport is a ExportType enum value + ExportTypeIncrementalExport = "INCREMENTAL_EXPORT" +) + +// ExportType_Values returns all elements of the ExportType enum +func ExportType_Values() []string { + return []string{ + ExportTypeFullExport, + ExportTypeIncrementalExport, + } +} + +const ( + // ExportViewTypeNewImage is a ExportViewType enum value + ExportViewTypeNewImage = "NEW_IMAGE" + + // ExportViewTypeNewAndOldImages is a ExportViewType enum value + ExportViewTypeNewAndOldImages = "NEW_AND_OLD_IMAGES" +) + +// ExportViewType_Values returns all elements of the ExportViewType enum +func ExportViewType_Values() []string { + return []string{ + ExportViewTypeNewImage, + ExportViewTypeNewAndOldImages, + } +} + const ( // GlobalTableStatusCreating is a GlobalTableStatus enum value GlobalTableStatusCreating = "CREATING" diff --git a/vendor/modules.txt b/vendor/modules.txt index 7f22de0558..d3364622da 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -117,7 +117,7 @@ github.com/armon/go-metrics/prometheus # github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 ## explicit; go 1.13 github.com/asaskevich/govalidator -# github.com/aws/aws-sdk-go v1.44.327 +# github.com/aws/aws-sdk-go v1.45.24 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/auth/bearer From dbe4fefd9196314747a713c9fbfe8bfa90b701b6 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Tue, 10 Oct 2023 00:22:53 -0700 Subject: [PATCH 12/13] Fix log message missing key field (#5594) * fix ddb kv log message Signed-off-by: Ben Ye * more logging fix Signed-off-by: Ben Ye --------- Signed-off-by: Ben Ye --- pkg/ring/kv/dynamodb/client.go | 2 +- pkg/tracing/tracing.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/ring/kv/dynamodb/client.go b/pkg/ring/kv/dynamodb/client.go index 070a911e28..b722a2d1ad 100644 --- a/pkg/ring/kv/dynamodb/client.go +++ b/pkg/ring/kv/dynamodb/client.go @@ -78,7 +78,7 @@ func NewClient(cfg Config, cc codec.Codec, logger log.Logger, registerer prometh staleData: make(map[string]staleData), backoffConfig: backoffConfig, } - level.Info(c.logger).Log("dynamodb kv initialized") + level.Info(c.logger).Log("msg", "dynamodb kv initialized") return c, nil } diff --git a/pkg/tracing/tracing.go b/pkg/tracing/tracing.go index 33f90ee5bd..8c92ac2fba 100644 --- a/pkg/tracing/tracing.go +++ b/pkg/tracing/tracing.go @@ -65,7 +65,7 @@ func (c *Config) Validate() error { return errors.New("otlp-endpoint must be defined when using otel exporter") } if len(c.Otel.OltpEndpoint) > 0 { - level.Warn(util_log.Logger).Log("DEPRECATED: otel.oltp-endpoint is deprecated. User otel.otlp-endpoint instead.") + level.Warn(util_log.Logger).Log("msg", "DEPRECATED: otel.oltp-endpoint is deprecated. Use otel.otlp-endpoint instead.") } } From e81ee1de9a60530b66d7ddb0eab1a51c3fef95df Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Wed, 11 Oct 2023 00:54:42 -0700 Subject: [PATCH 13/13] Upgrade prometheus to latest main (#5593) --- go.mod | 11 +- go.sum | 34 +- pkg/compactor/compactor_test.go | 2 +- pkg/cortex/modules_test.go | 2 +- pkg/ingester/ingester.go | 38 +- pkg/ingester/ingester_test.go | 6 +- pkg/querier/block.go | 11 +- pkg/querier/blocks_store_queryable.go | 109 +- pkg/querier/blocks_store_queryable_test.go | 20 +- pkg/querier/distributor_queryable.go | 25 +- pkg/querier/distributor_queryable_test.go | 22 +- pkg/querier/duplicates_test.go | 9 +- pkg/querier/error_translate_queryable.go | 41 +- pkg/querier/error_translate_queryable_test.go | 13 +- pkg/querier/lazyquery/lazyquery.go | 17 +- pkg/querier/querier.go | 199 +- pkg/querier/querier_test.go | 43 +- pkg/querier/remote_read.go | 4 +- pkg/querier/remote_read_test.go | 9 +- pkg/querier/series/series_set.go | 22 +- .../tenantfederation/merge_queryable.go | 145 +- .../tenantfederation/merge_queryable_test.go | 188 +- pkg/querier/timeseries_series_set.go | 9 +- pkg/ruler/ruler_test.go | 17 +- pkg/storage/tsdb/index_cache.go | 9 +- .../bucket_store_inmemory_server.go | 6 +- pkg/storegateway/bucket_stores.go | 1 + pkg/storegateway/bucket_stores_test.go | 10 +- pkg/storegateway/gateway_test.go | 5 +- pkg/util/concurrency/runner.go | 2 +- vendor/github.com/hashicorp/consul/api/acl.go | 45 + .../github.com/hashicorp/consul/api/agent.go | 7 + vendor/github.com/hashicorp/consul/api/api.go | 13 + .../hashicorp/consul/api/config_entry.go | 1 - .../consul/api/config_entry_jwt_provider.go | 73 + .../github.com/hashicorp/consul/api/health.go | 1 + .../hashicorp/consul/api/operator_audit.go | 40 + .../hashicorp/consul/api/operator_raft.go | 3 + .../hashicorp/consul/api/operator_usage.go | 1 + .../hashicorp/consul/api/prepared_query.go | 4 +- .../prometheus/prometheus/config/config.go | 8 + .../prometheus/discovery/manager.go | 8 +- .../model/histogram/float_histogram.go | 333 ++-- .../prometheus/model/labels/labels.go | 9 +- .../model/labels/labels_stringlabels.go | 7 +- .../prometheus/model/rulefmt/rulefmt.go | 16 +- .../model/textparse/openmetricsparse.go | 6 +- .../prometheus/model/textparse/promparse.go | 4 +- .../model/textparse/protobufparse.go | 12 +- .../prometheus/notifier/notifier.go | 12 + .../prometheus/prometheus/promql/engine.go | 175 +- .../prometheus/prometheus/promql/functions.go | 570 +++--- .../prometheus/promql/parser/ast.go | 78 +- .../prometheus/promql/parser/functions.go | 10 + .../promql/parser/generated_parser.y | 205 +- .../promql/parser/generated_parser.y.go | 1277 +++++++----- .../prometheus/promql/parser/lex.go | 186 +- .../prometheus/promql/parser/parse.go | 218 ++- .../promql/parser/posrange/posrange.go | 54 + .../prometheus/prometheus/promql/quantile.go | 70 +- .../prometheus/prometheus/promql/test.go | 103 +- .../promql/testdata/native_histograms.test | 226 +++ .../prometheus/prometheus/promql/value.go | 4 +- .../prometheus/prometheus/rules/alerting.go | 4 +- .../prometheus/prometheus/rules/manager.go | 25 +- .../prometheus/prometheus/scrape/scrape.go | 49 +- .../prometheus/prometheus/storage/buffer.go | 39 +- .../prometheus/prometheus/storage/fanout.go | 12 +- .../prometheus/prometheus/storage/generic.go | 23 +- .../prometheus/storage/interface.go | 57 +- .../prometheus/prometheus/storage/lazy.go | 24 +- .../prometheus/prometheus/storage/merge.go | 52 +- .../prometheus/prometheus/storage/noop.go | 19 +- .../storage/remote/azuread/azuread.go | 116 +- .../prometheus/storage/remote/client.go | 12 +- .../prometheus/storage/remote/codec.go | 13 +- .../prometheusremotewrite/helper.go | 9 +- .../storage/remote/queue_manager.go | 6 +- .../prometheus/storage/remote/read.go | 20 +- .../prometheus/storage/remote/read_handler.go | 16 +- .../prometheus/storage/remote/storage.go | 8 +- .../prometheus/storage/secondary.go | 20 +- .../prometheus/prometheus/storage/series.go | 23 +- .../prometheus/prometheus/tsdb/block.go | 51 +- .../prometheus/tsdb/chunkenc/chunk.go | 14 + .../tsdb/chunkenc/float_histogram.go | 33 +- .../prometheus/tsdb/chunkenc/histogram.go | 33 +- .../tsdb/chunkenc/histogram_meta.go | 11 +- .../prometheus/tsdb/chunks/chunks.go | 95 + .../prometheus/tsdb/chunks/samples.go | 89 + .../prometheus/prometheus/tsdb/compact.go | 10 +- .../prometheus/prometheus/tsdb/db.go | 53 +- .../prometheus/prometheus/tsdb/exemplar.go | 4 +- .../prometheus/prometheus/tsdb/head.go | 21 +- .../prometheus/prometheus/tsdb/head_append.go | 117 +- .../prometheus/prometheus/tsdb/head_read.go | 29 +- .../prometheus/prometheus/tsdb/head_wal.go | 76 +- .../prometheus/prometheus/tsdb/index/index.go | 59 +- .../prometheus/tsdb/index/postings.go | 24 +- .../prometheus/tsdb/index/postingsstats.go | 4 +- .../prometheus/tsdb/ooo_head_read.go | 43 +- .../prometheus/prometheus/tsdb/querier.go | 121 +- .../prometheus/tsdb/tsdbutil/chunks.go | 159 -- .../prometheus/tsdb/tsdbutil/histogram.go | 12 +- .../prometheus/tsdb/wlog/checkpoint.go | 4 +- .../prometheus/prometheus/tsdb/wlog/wlog.go | 4 +- .../util/annotations/annotations.go | 175 ++ .../prometheus/prometheus/util/stats/timer.go | 4 +- .../prometheus/prometheus/web/api/v1/api.go | 53 +- .../thanos-io/promql-engine/engine/engine.go | 46 +- .../execution/aggregate/hashaggregate.go | 3 +- .../execution/aggregate/khashaggregate.go | 3 +- .../execution/aggregate/scalar_table.go | 3 +- .../execution/aggregate/vector_table.go | 3 +- .../promql-engine/execution/binary/index.go | 35 - .../promql-engine/execution/binary/scalar.go | 2 +- .../execution/binary/{table.go => utils.go} | 141 +- .../promql-engine/execution/binary/vector.go | 633 +++--- .../promql-engine/execution/execution.go | 5 +- .../execution/function/absent.go | 3 +- .../execution/function/functions.go | 38 + .../execution/function/histogram.go | 3 +- .../promql-engine/execution/function/noarg.go | 3 +- .../execution/function/operator.go | 7 +- .../execution/function/relabel.go | 3 +- .../promql-engine/execution/parse/errors.go | 3 +- .../execution/parse/functions.go | 9 +- .../promql-engine/execution/scan/functions.go | 92 +- .../execution/scan/matrix_selector.go | 44 +- .../promql-engine/execution/scan/subquery.go | 19 +- .../step_invariant/step_invariant.go | 5 +- .../execution/storage/series_selector.go | 4 +- .../execution/warnings/context.go | 18 +- .../promql-engine/extlabels/labels.go | 19 - .../promql-engine/logicalplan/distribute.go | 10 +- .../promql-engine/logicalplan/filter.go | 5 +- .../logicalplan/merge_selects.go | 3 +- .../promql-engine/logicalplan/passthrough.go | 3 +- .../promql-engine/logicalplan/plan.go | 3 +- .../logicalplan/propagate_selectors.go | 3 +- .../logicalplan/sort_matchers.go | 3 +- .../promql-engine/logicalplan/trim_sorts.go | 3 +- .../promql-engine/logicalplan/user_defined.go | 3 +- .../thanos-io/promql-engine/parser/ast.go | 494 ----- .../promql-engine/parser/functions.go | 393 ---- .../promql-engine/parser/generated_parser.y | 749 ------- .../parser/generated_parser.y.go | 1725 ----------------- .../thanos-io/promql-engine/parser/lex.go | 846 -------- .../thanos-io/promql-engine/parser/parse.go | 807 -------- .../promql-engine/parser/prettier.go | 166 -- .../promql-engine/parser/prettier_rules.md | 16 - .../thanos-io/promql-engine/parser/printer.go | 229 --- .../thanos-io/promql-engine/parser/value.go | 45 - .../thanos-io/promql-engine/query/options.go | 2 +- .../thanos-io/thanos/pkg/block/index.go | 21 +- .../pkg/block/indexheader/binary_reader.go | 20 +- .../thanos/pkg/block/indexheader/header.go | 3 +- .../block/indexheader/lazy_binary_reader.go | 4 +- .../pkg/block/indexheader/parallel_bucket.go | 12 +- .../thanos-io/thanos/pkg/compact/compact.go | 8 +- .../pkg/compact/downsample/downsample.go | 7 +- .../thanos-io/thanos/pkg/dedup/iter.go | 4 +- .../thanos/pkg/promclient/promclient.go | 2 + .../thanos-io/thanos/pkg/store/bucket.go | 67 +- .../thanos/pkg/store/cache/factory.go | 12 +- .../thanos/pkg/store/cache/inmemory.go | 15 +- .../thanos/pkg/store/cache/memcached.go | 13 +- .../thanos-io/thanos/pkg/store/flushable.go | 4 +- .../thanos/pkg/store/lazy_postings.go | 11 +- .../thanos-io/thanos/pkg/store/tsdb.go | 12 +- .../thanos/pkg/testutil/e2eutil/prometheus.go | 9 +- .../github.com/zhangyunhao116/umap/.gitignore | 5 + vendor/github.com/zhangyunhao116/umap/LICENSE | 29 + .../github.com/zhangyunhao116/umap/bithack.go | 93 + vendor/github.com/zhangyunhao116/umap/hash.go | 11 + vendor/github.com/zhangyunhao116/umap/map.go | 414 ++++ .../github.com/zhangyunhao116/umap/readme.md | 162 ++ vendor/github.com/zhangyunhao116/umap/util.go | 30 + vendor/golang.org/x/exp/slices/cmp.go | 44 + vendor/golang.org/x/exp/slices/slices.go | 353 +++- vendor/golang.org/x/exp/slices/sort.go | 115 +- .../slices/{zsortfunc.go => zsortanyfunc.go} | 154 +- .../golang.org/x/exp/slices/zsortordered.go | 34 +- vendor/modules.txt | 18 +- 184 files changed, 6406 insertions(+), 8585 deletions(-) create mode 100644 vendor/github.com/hashicorp/consul/api/operator_audit.go create mode 100644 vendor/github.com/prometheus/prometheus/promql/parser/posrange/posrange.go create mode 100644 vendor/github.com/prometheus/prometheus/promql/testdata/native_histograms.test create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/chunks/samples.go delete mode 100644 vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go create mode 100644 vendor/github.com/prometheus/prometheus/util/annotations/annotations.go delete mode 100644 vendor/github.com/thanos-io/promql-engine/execution/binary/index.go rename vendor/github.com/thanos-io/promql-engine/execution/binary/{table.go => utils.go} (57%) delete mode 100644 vendor/github.com/thanos-io/promql-engine/parser/ast.go delete mode 100644 vendor/github.com/thanos-io/promql-engine/parser/functions.go delete mode 100644 vendor/github.com/thanos-io/promql-engine/parser/generated_parser.y delete mode 100644 vendor/github.com/thanos-io/promql-engine/parser/generated_parser.y.go delete mode 100644 vendor/github.com/thanos-io/promql-engine/parser/lex.go delete mode 100644 vendor/github.com/thanos-io/promql-engine/parser/parse.go delete mode 100644 vendor/github.com/thanos-io/promql-engine/parser/prettier.go delete mode 100644 vendor/github.com/thanos-io/promql-engine/parser/prettier_rules.md delete mode 100644 vendor/github.com/thanos-io/promql-engine/parser/printer.go delete mode 100644 vendor/github.com/thanos-io/promql-engine/parser/value.go create mode 100644 vendor/github.com/zhangyunhao116/umap/.gitignore create mode 100644 vendor/github.com/zhangyunhao116/umap/LICENSE create mode 100644 vendor/github.com/zhangyunhao116/umap/bithack.go create mode 100644 vendor/github.com/zhangyunhao116/umap/hash.go create mode 100644 vendor/github.com/zhangyunhao116/umap/map.go create mode 100644 vendor/github.com/zhangyunhao116/umap/readme.md create mode 100644 vendor/github.com/zhangyunhao116/umap/util.go create mode 100644 vendor/golang.org/x/exp/slices/cmp.go rename vendor/golang.org/x/exp/slices/{zsortfunc.go => zsortanyfunc.go} (64%) diff --git a/go.mod b/go.mod index 5cfabb7ef9..4937e158ea 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/gorilla/mux v1.8.0 github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 - github.com/hashicorp/consul/api v1.22.0 + github.com/hashicorp/consul/api v1.25.1 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/memberlist v0.5.0 @@ -46,14 +46,14 @@ require ( github.com/prometheus/client_model v0.4.0 github.com/prometheus/common v0.44.0 // Prometheus maps version 2.x.y to tags v0.x.y. - github.com/prometheus/prometheus v0.46.1-0.20230818184859-4d8e380269da + github.com/prometheus/prometheus v0.47.2-0.20231009162353-f6d9c84fde6b github.com/segmentio/fasthash v1.0.3 github.com/sony/gobreaker v0.5.0 github.com/spf13/afero v1.9.5 github.com/stretchr/testify v1.8.4 github.com/thanos-io/objstore v0.0.0-20230921130928-63a603e651ed - github.com/thanos-io/promql-engine v0.0.0-20230821193351-e1ae4275b96e - github.com/thanos-io/thanos v0.32.5-0.20231006043659-79bbf34b4275 + github.com/thanos-io/promql-engine v0.0.0-20231003153358-8605b6afba51 + github.com/thanos-io/thanos v0.32.5-0.20231010190130-dfe0bbff507b github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d go.etcd.io/etcd/api/v3 v3.5.9 @@ -198,6 +198,7 @@ require ( github.com/vimeo/galaxycache v0.0.0-20210323154928-b7e5d71c067a // indirect github.com/weaveworks/promrus v1.2.0 // indirect github.com/yuin/gopher-lua v1.1.0 // indirect + github.com/zhangyunhao116/umap v0.0.0-20221211160557-cb7705fafa39 // indirect go.mongodb.org/mongo-driver v1.12.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 // indirect @@ -216,7 +217,7 @@ require ( go4.org/intern v0.0.0-20230525184215-6c62f75575cb // indirect go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 // indirect golang.org/x/crypto v0.12.0 // indirect - golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect + golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b // indirect golang.org/x/mod v0.12.0 // indirect golang.org/x/oauth2 v0.11.0 // indirect golang.org/x/sys v0.12.0 // indirect diff --git a/go.sum b/go.sum index dc95c58bfd..7d408c1db8 100644 --- a/go.sum +++ b/go.sum @@ -762,6 +762,7 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -829,7 +830,6 @@ github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMd github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= -github.com/googleapis/gnostic v0.6.9 h1:hNeVzFMdppk7EuvFnJjiowGFBmSau2llc2rseO0+eNw= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gophercloud/gophercloud v1.5.0 h1:cDN6XFCLKiiqvYpjQLq9AiM7RDRbIC9450WpPH+yvXo= @@ -854,10 +854,10 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rH github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= -github.com/hashicorp/consul/api v1.22.0 h1:ydEvDooB/A0c/xpsBd8GSt7P2/zYPBui4KrNip0xGjE= -github.com/hashicorp/consul/api v1.22.0/go.mod h1:zHpYgZ7TeYqS6zaszjwSt128OwESRpnhU9aGa6ue3Eg= +github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE= +github.com/hashicorp/consul/api v1.25.1/go.mod h1:iiLVwR/htV7mas/sy0O+XSuEnrdBUUydemjxcUrAt4g= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/consul/sdk v0.14.0 h1:Hly+BMNMssVzoWddbBnBFi3W+Fzytvm0haSkihhj3GU= +github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= @@ -1134,8 +1134,8 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= -github.com/prometheus/prometheus v0.46.1-0.20230818184859-4d8e380269da h1:D5uk+FEdNjQs9ly/wkb/pXkoWc60GcV9RVsMUpg/BIE= -github.com/prometheus/prometheus v0.46.1-0.20230818184859-4d8e380269da/go.mod h1:uvQsz/zwlfb8TRuWjK7L7ofV5ycAYq8dorvNf2iOBN4= +github.com/prometheus/prometheus v0.47.2-0.20231009162353-f6d9c84fde6b h1:oiCf/rFBXXaDLyiK1MnMKYlSA7Xm2+SQePvXnl8bNUI= +github.com/prometheus/prometheus v0.47.2-0.20231009162353-f6d9c84fde6b/go.mod h1:UC0TwJiF90m2T3iYPQBKnGu8gv3s55dF/EgpTq8gyvo= github.com/redis/rueidis v1.0.14-go1.18 h1:dGir5z8w8X1ex7JWO/Zx2FMBrZgQ8Yjm+lw9fPLSNGw= github.com/redis/rueidis v1.0.14-go1.18/go.mod h1:HGekzV3HbmzFmRK6j0xic8Z9119+ECoGMjeN1TV1NYU= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -1210,10 +1210,10 @@ github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1 github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= github.com/thanos-io/objstore v0.0.0-20230921130928-63a603e651ed h1:iWQdY3S6DpWjelVvKKSKgS7LeLkhK4VaEnQfphB9ZXA= github.com/thanos-io/objstore v0.0.0-20230921130928-63a603e651ed/go.mod h1:oJ82xgcBDzGJrEgUsjlTj6n01+ZWUMMUR8BlZzX5xDE= -github.com/thanos-io/promql-engine v0.0.0-20230821193351-e1ae4275b96e h1:kwsFCU8eSkZehbrAN3nXPw5RdMHi/Bok/y8l2C4M+gk= -github.com/thanos-io/promql-engine v0.0.0-20230821193351-e1ae4275b96e/go.mod h1:+T/ZYNCGybT6eTsGGvVtGb63nT1cvUmH6MjqRrcQoKw= -github.com/thanos-io/thanos v0.32.5-0.20231006043659-79bbf34b4275 h1:y2YPqM1XiBw7EhLg45F6A1g8bgt4yYxkaRAeQaNLWYk= -github.com/thanos-io/thanos v0.32.5-0.20231006043659-79bbf34b4275/go.mod h1:HwiHn7u6GeES403BTACOYib/JKAJknf8dByU/uJiEr0= +github.com/thanos-io/promql-engine v0.0.0-20231003153358-8605b6afba51 h1:Av62ac0O9wRbLI6xvtm51BBZnxHyEgLXV/YmiJpdogc= +github.com/thanos-io/promql-engine v0.0.0-20231003153358-8605b6afba51/go.mod h1:vfXJv1JXNdLfHnjsHsLLJl5tyI7KblF76Wo5lZ9YC4Q= +github.com/thanos-io/thanos v0.32.5-0.20231010190130-dfe0bbff507b h1:7eH6FRIQ/d0wlklAHe8dFpMAxG81C6uE7LTEj5jafss= +github.com/thanos-io/thanos v0.32.5-0.20231010190130-dfe0bbff507b/go.mod h1:tqT2FQHiOF16empgE3vvZrA++fN9Cx0lwmxlMmBaVzA= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -1245,6 +1245,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE= github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +github.com/zhangyunhao116/umap v0.0.0-20221211160557-cb7705fafa39 h1:D3ltj0b2c2FgUacKrB1pWGgwrUyCESY9W8XYYQ5sqY8= +github.com/zhangyunhao116/umap v0.0.0-20221211160557-cb7705fafa39/go.mod h1:r86X1CnsDRrOeLtJlqRWdELPWpkcf933GTlojQlifQw= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs= go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= @@ -1360,8 +1362,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b h1:r+vk0EmXNmekl0S0BascoeeoHk/L7wmaW2QF90K+kYI= +golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1998,12 +2000,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.27.3 h1:yR6oQXXnUEBWEWcvPWS0jQL575KoAboQPfJAuKNrw5Y= -k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM= -k8s.io/client-go v0.27.3 h1:7dnEGHZEJld3lYwxvLl7WoehK6lAq7GvgjxpA3nv1E8= +k8s.io/api v0.28.1 h1:i+0O8k2NPBCPYaMB+uCkseEbawEt/eFaiRqUx8aB108= +k8s.io/apimachinery v0.28.1 h1:EJD40og3GizBSV3mkIoXQBsws32okPOy+MkRyzh6nPY= +k8s.io/client-go v0.28.1 h1:pRhMzB8HyLfVwpngWKE8hDcXRqifh1ga2Z/PU9SXVK8= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515 h1:OmK1d0WrkD3IPfkskvroRykOulHVHf0s0ZIFRjyt+UI= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index 2afbd8d5ee..36ae05bda9 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -1467,7 +1467,7 @@ func createTSDBBlock(t *testing.T, bkt objstore.Bucket, userID string, minT, max require.NoError(t, err) } - require.NoError(t, db.Compact()) + require.NoError(t, db.Compact(context.Background())) require.NoError(t, db.Snapshot(snapshotDir, true)) // Look for the created block (we expect one). diff --git a/pkg/cortex/modules_test.go b/pkg/cortex/modules_test.go index 2eafc6ac60..7316e07274 100644 --- a/pkg/cortex/modules_test.go +++ b/pkg/cortex/modules_test.go @@ -166,7 +166,7 @@ func (p *myPusher) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte type myQueryable struct{} -func (q *myQueryable) Querier(ctx context.Context, mint, maxt int64) (prom_storage.Querier, error) { +func (q *myQueryable) Querier(mint, maxt int64) (prom_storage.Querier, error) { return prom_storage.NoopQuerier(), nil } diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index f3bad33946..96d8df52f7 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -280,12 +280,12 @@ func (u *userTSDB) Appender(ctx context.Context) storage.Appender { return u.db.Appender(ctx) } -func (u *userTSDB) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - return u.db.Querier(ctx, mint, maxt) +func (u *userTSDB) Querier(mint, maxt int64) (storage.Querier, error) { + return u.db.Querier(mint, maxt) } -func (u *userTSDB) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { - return u.db.ChunkQuerier(ctx, mint, maxt) +func (u *userTSDB) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) { + return u.db.ChunkQuerier(mint, maxt) } func (u *userTSDB) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) { @@ -304,8 +304,8 @@ func (u *userTSDB) Close() error { return u.db.Close() } -func (u *userTSDB) Compact() error { - return u.db.Compact() +func (u *userTSDB) Compact(ctx context.Context) error { + return u.db.Compact(ctx) } func (u *userTSDB) StartTime() (int64, error) { @@ -1273,14 +1273,14 @@ func (i *Ingester) Query(ctx context.Context, req *client.QueryRequest) (*client return &client.QueryResponse{}, nil } - q, err := db.Querier(ctx, int64(from), int64(through)) + q, err := db.Querier(int64(from), int64(through)) if err != nil { return nil, err } defer q.Close() // It's not required to return sorted series because series are sorted by the Cortex querier. - ss := q.Select(false, nil, matchers...) + ss := q.Select(ctx, false, nil, matchers...) if ss.Err() != nil { return nil, ss.Err() } @@ -1429,7 +1429,7 @@ func (i *Ingester) labelsValuesCommon(ctx context.Context, req *client.LabelValu return nil, cleanup, err } - q, err := db.Querier(ctx, mint, maxt) + q, err := db.Querier(mint, maxt) if err != nil { return nil, cleanup, err } @@ -1438,7 +1438,7 @@ func (i *Ingester) labelsValuesCommon(ctx context.Context, req *client.LabelValu q.Close() } - vals, _, err := q.LabelValues(labelName, matchers...) + vals, _, err := q.LabelValues(ctx, labelName, matchers...) if err != nil { return nil, cleanup, err } @@ -1505,7 +1505,7 @@ func (i *Ingester) labelNamesCommon(ctx context.Context, req *client.LabelNamesR return nil, cleanup, err } - q, err := db.Querier(ctx, mint, maxt) + q, err := db.Querier(mint, maxt) if err != nil { return nil, cleanup, err } @@ -1514,7 +1514,7 @@ func (i *Ingester) labelNamesCommon(ctx context.Context, req *client.LabelNamesR q.Close() } - names, _, err := q.LabelNames() + names, _, err := q.LabelNames(ctx) if err != nil { return nil, cleanup, err } @@ -1585,7 +1585,7 @@ func (i *Ingester) metricsForLabelMatchersCommon(ctx context.Context, req *clien return nil, cleanup, err } - q, err := db.Querier(ctx, mint, maxt) + q, err := db.Querier(mint, maxt) if err != nil { return nil, cleanup, err } @@ -1612,12 +1612,12 @@ func (i *Ingester) metricsForLabelMatchersCommon(ctx context.Context, req *clien return nil, cleanup, ctx.Err() } - seriesSet := q.Select(true, hints, matchers...) + seriesSet := q.Select(ctx, true, hints, matchers...) sets = append(sets, seriesSet) } mergedSet = storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) } else { - mergedSet = q.Select(false, hints, matchersSet[0]...) + mergedSet = q.Select(ctx, false, hints, matchersSet[0]...) } // Generate the response merging all series sets. @@ -1783,14 +1783,14 @@ func (i *Ingester) QueryStream(req *client.QueryRequest, stream client.Ingester_ // queryStreamChunks streams metrics from a TSDB. This implements the client.IngesterServer interface func (i *Ingester) queryStreamChunks(ctx context.Context, db *userTSDB, from, through int64, matchers []*labels.Matcher, sm *storepb.ShardMatcher, stream client.Ingester_QueryStreamServer) (numSeries, numSamples int, _ error) { - q, err := db.ChunkQuerier(ctx, from, through) + q, err := db.ChunkQuerier(from, through) if err != nil { return 0, 0, err } defer q.Close() // It's not required to return sorted series because series are sorted by the Cortex querier. - ss := q.Select(false, nil, matchers...) + ss := q.Select(ctx, false, nil, matchers...) if ss.Err() != nil { return 0, 0, ss.Err() } @@ -2002,7 +2002,7 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { // this will actually create the blocks. If there is no data (empty TSDB), this is a no-op, although // local blocks compaction may still take place if configured. level.Info(userLogger).Log("msg", "Running compaction after WAL replay") - err = db.Compact() + err = db.Compact(context.TODO()) if err != nil { return nil, errors.Wrapf(err, "failed to compact TSDB: %s", udir) } @@ -2400,7 +2400,7 @@ func (i *Ingester) compactBlocks(ctx context.Context, force bool, allowed *util. default: reason = "regular" - err = userDB.Compact() + err = userDB.Compact(ctx) } if err != nil { diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index fce1c60b1c..132003f4ff 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -3776,7 +3776,7 @@ func TestIngesterNotDeleteUnshippedBlocks(t *testing.T) { db := i.getTSDB(userID) require.NotNil(t, db) - require.Nil(t, db.Compact()) + require.Nil(t, db.Compact(ctx)) oldBlocks := db.Blocks() require.Equal(t, 3, len(oldBlocks)) @@ -3800,7 +3800,7 @@ func TestIngesterNotDeleteUnshippedBlocks(t *testing.T) { _, err := i.Push(ctx, req) require.NoError(t, err) } - require.Nil(t, db.Compact()) + require.Nil(t, db.Compact(ctx)) // Only the second block should be gone along with a new block. newBlocks := db.Blocks() @@ -3828,7 +3828,7 @@ func TestIngesterNotDeleteUnshippedBlocks(t *testing.T) { _, err := i.Push(ctx, req) require.NoError(t, err) } - require.Nil(t, db.Compact()) + require.Nil(t, db.Compact(ctx)) // All blocks from the old blocks should be gone now. newBlocks2 := db.Blocks() diff --git a/pkg/querier/block.go b/pkg/querier/block.go index c0ce54f4b7..936879dac9 100644 --- a/pkg/querier/block.go +++ b/pkg/querier/block.go @@ -4,15 +4,16 @@ import ( "math" "sort" - "github.com/cortexproject/cortex/pkg/querier/iterators" - "github.com/cortexproject/cortex/pkg/querier/series" - "github.com/pkg/errors" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/util/annotations" "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" + + "github.com/cortexproject/cortex/pkg/querier/iterators" + "github.com/cortexproject/cortex/pkg/querier/series" ) func convertMatchersToLabelMatcher(matchers []*labels.Matcher) []storepb.LabelMatcher { @@ -42,7 +43,7 @@ func convertMatchersToLabelMatcher(matchers []*labels.Matcher) []storepb.LabelMa // Implementation of storage.SeriesSet, based on individual responses from store client. type blockQuerierSeriesSet struct { series []*storepb.Series - warnings storage.Warnings + warnings annotations.Annotations // next response to process next int @@ -82,7 +83,7 @@ func (bqss *blockQuerierSeriesSet) Err() error { return nil } -func (bqss *blockQuerierSeriesSet) Warnings() storage.Warnings { +func (bqss *blockQuerierSeriesSet) Warnings() annotations.Annotations { return bqss.warnings } diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index f7dac096c7..2ab1698e43 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -21,6 +21,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/pool" @@ -283,21 +284,14 @@ func (q *BlocksStoreQueryable) stopping(_ error) error { } // Querier returns a new Querier on the storage. -func (q *BlocksStoreQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { +func (q *BlocksStoreQueryable) Querier(mint, maxt int64) (storage.Querier, error) { if s := q.State(); s != services.Running { return nil, errors.Errorf("BlocksStoreQueryable is not running: %v", s) } - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - return &blocksStoreQuerier{ - ctx: ctx, minT: mint, maxT: maxt, - userID: userID, finder: q.finder, stores: q.stores, metrics: q.metrics, @@ -309,9 +303,7 @@ func (q *BlocksStoreQueryable) Querier(ctx context.Context, mint, maxt int64) (s } type blocksStoreQuerier struct { - ctx context.Context minT, maxT int64 - userID string finder BlocksFinder stores BlocksStoreSet metrics *blocksStoreQueryableMetrics @@ -326,12 +318,17 @@ type blocksStoreQuerier struct { // Select implements storage.Querier interface. // The bool passed is ignored because the series is always sorted. -func (q *blocksStoreQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - return q.selectSorted(sp, matchers...) +func (q *blocksStoreQuerier) Select(ctx context.Context, _ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + return q.selectSorted(ctx, sp, matchers...) } -func (q *blocksStoreQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - spanLog, spanCtx := spanlogger.New(q.ctx, "blocksStoreQuerier.LabelNames") +func (q *blocksStoreQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + userID, err := tenant.TenantID(ctx) + if err != nil { + return nil, nil, err + } + + spanLog, spanCtx := spanlogger.New(ctx, "blocksStoreQuerier.LabelNames") defer spanLog.Span.Finish() minT, maxT := q.minT, q.maxT @@ -339,61 +336,64 @@ func (q *blocksStoreQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, var ( resMtx sync.Mutex resNameSets = [][]string{} - resWarnings = storage.Warnings(nil) + resWarnings = annotations.Annotations(nil) convertedMatchers = convertMatchersToLabelMatcher(matchers) ) queryFunc := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error, error) { - nameSets, warnings, queriedBlocks, err, retryableError := q.fetchLabelNamesFromStore(spanCtx, clients, minT, maxT, convertedMatchers) + nameSets, warnings, queriedBlocks, err, retryableError := q.fetchLabelNamesFromStore(spanCtx, userID, clients, minT, maxT, convertedMatchers) if err != nil { return nil, err, retryableError } resMtx.Lock() resNameSets = append(resNameSets, nameSets...) - resWarnings = append(resWarnings, warnings...) + resWarnings.Merge(warnings) resMtx.Unlock() return queriedBlocks, nil, retryableError } - err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, queryFunc) - if err != nil { + if err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, userID, queryFunc); err != nil { return nil, nil, err } return strutil.MergeSlices(resNameSets...), resWarnings, nil } -func (q *blocksStoreQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - spanLog, spanCtx := spanlogger.New(q.ctx, "blocksStoreQuerier.LabelValues") +func (q *blocksStoreQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + userID, err := tenant.TenantID(ctx) + if err != nil { + return nil, nil, err + } + + spanLog, spanCtx := spanlogger.New(ctx, "blocksStoreQuerier.LabelValues") defer spanLog.Span.Finish() minT, maxT := q.minT, q.maxT var ( resValueSets = [][]string{} - resWarnings = storage.Warnings(nil) + resWarnings = annotations.Annotations(nil) resultMtx sync.Mutex ) queryFunc := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error, error) { - valueSets, warnings, queriedBlocks, err, retryableError := q.fetchLabelValuesFromStore(spanCtx, name, clients, minT, maxT, matchers...) + valueSets, warnings, queriedBlocks, err, retryableError := q.fetchLabelValuesFromStore(spanCtx, userID, name, clients, minT, maxT, matchers...) if err != nil { return nil, err, retryableError } resultMtx.Lock() resValueSets = append(resValueSets, valueSets...) - resWarnings = append(resWarnings, warnings...) + resWarnings.Merge(warnings) resultMtx.Unlock() return queriedBlocks, nil, retryableError } - err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, queryFunc) - if err != nil { + if err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, userID, queryFunc); err != nil { return nil, nil, err } @@ -404,8 +404,13 @@ func (q *blocksStoreQuerier) Close() error { return nil } -func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - spanLog, spanCtx := spanlogger.New(q.ctx, "blocksStoreQuerier.selectSorted") +func (q *blocksStoreQuerier) selectSorted(ctx context.Context, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + userID, err := tenant.TenantID(ctx) + if err != nil { + return storage.ErrSeriesSet(err) + } + + spanLog, spanCtx := spanlogger.New(ctx, "blocksStoreQuerier.selectSorted") defer spanLog.Span.Finish() minT, maxT := q.minT, q.maxT @@ -415,16 +420,16 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* var ( resSeriesSets = []storage.SeriesSet(nil) - resWarnings = storage.Warnings(nil) + resWarnings = annotations.Annotations(nil) - maxChunksLimit = q.limits.MaxChunksPerQueryFromStore(q.userID) + maxChunksLimit = q.limits.MaxChunksPerQueryFromStore(userID) leftChunksLimit = maxChunksLimit resultMtx sync.Mutex ) queryFunc := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error, error) { - seriesSets, queriedBlocks, warnings, numChunks, err, retryableError := q.fetchSeriesFromStores(spanCtx, sp, clients, minT, maxT, matchers, maxChunksLimit, leftChunksLimit) + seriesSets, queriedBlocks, warnings, numChunks, err, retryableError := q.fetchSeriesFromStores(spanCtx, sp, userID, clients, minT, maxT, matchers, maxChunksLimit, leftChunksLimit) if err != nil { return nil, err, retryableError } @@ -432,7 +437,7 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* resultMtx.Lock() resSeriesSets = append(resSeriesSets, seriesSets...) - resWarnings = append(resWarnings, warnings...) + resWarnings.Merge(warnings) // Given a single block is guaranteed to not be queried twice, we can safely decrease the number of // chunks we can still read before hitting the limit (max == 0 means disabled). @@ -444,8 +449,7 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* return queriedBlocks, nil, retryableError } - err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, queryFunc) - if err != nil { + if err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, userID, queryFunc); err != nil { return storage.ErrSeriesSet(err) } @@ -458,7 +462,7 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* resWarnings) } -func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logger log.Logger, minT, maxT int64, +func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logger log.Logger, minT, maxT int64, userID string, queryFunc func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error, error)) error { // If queryStoreAfter is enabled, we do manipulate the query maxt to query samples up until // now - queryStoreAfter, because the most recent time range is covered by ingesters. This @@ -481,7 +485,7 @@ func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logg } // Find the list of blocks we need to query given the time range. - knownBlocks, knownDeletionMarks, err := q.finder.GetBlocks(ctx, q.userID, minT, maxT) + knownBlocks, knownDeletionMarks, err := q.finder.GetBlocks(ctx, userID, minT, maxT) if err != nil { return err } @@ -510,7 +514,7 @@ func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logg for attempt := 1; attempt <= maxFetchSeriesAttempts; attempt++ { // Find the set of store-gateway instances having the blocks. The exclude parameter is the // map of blocks queried so far, with the list of store-gateway addresses for each block. - clients, err := q.stores.GetClientsFor(q.userID, remainingBlocks, attemptedBlocks, attemptedBlocksZones) + clients, err := q.stores.GetClientsFor(userID, remainingBlocks, attemptedBlocks, attemptedBlocksZones) if err != nil { // If it's a retry and we get an error, it means there are no more store-gateways left // from which running another attempt, so we're just stopping retrying. @@ -571,19 +575,20 @@ func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logg func (q *blocksStoreQuerier) fetchSeriesFromStores( ctx context.Context, sp *storage.SelectHints, + userID string, clients map[BlocksStoreClient][]ulid.ULID, minT int64, maxT int64, matchers []*labels.Matcher, maxChunksLimit int, leftChunksLimit int, -) ([]storage.SeriesSet, []ulid.ULID, storage.Warnings, int, error, error) { +) ([]storage.SeriesSet, []ulid.ULID, annotations.Annotations, int, error, error) { var ( - reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, cortex_tsdb.TenantIDExternalLabel, q.userID) + reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, cortex_tsdb.TenantIDExternalLabel, userID) g, gCtx = errgroup.WithContext(reqCtx) mtx = sync.Mutex{} seriesSets = []storage.SeriesSet(nil) - warnings = storage.Warnings(nil) + warnings = annotations.Annotations(nil) queriedBlocks = []ulid.ULID(nil) numChunks = atomic.NewInt32(0) spanLog = spanlogger.FromContext(ctx) @@ -635,7 +640,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( } mySeries := []*storepb.Series(nil) - myWarnings := storage.Warnings(nil) + myWarnings := annotations.Annotations(nil) myQueriedBlocks := []ulid.ULID(nil) for { @@ -707,7 +712,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( } if w := resp.GetWarning(); w != "" { - myWarnings = append(myWarnings, errors.New(w)) + myWarnings.Add(errors.New(w)) } if h := resp.GetHints(); h != nil { @@ -781,7 +786,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( // Store the result. mtx.Lock() seriesSets = append(seriesSets, &blockQuerierSeriesSet{series: mySeries}) - warnings = append(warnings, myWarnings...) + warnings.Merge(myWarnings) queriedBlocks = append(queriedBlocks, myQueriedBlocks...) mtx.Unlock() @@ -799,17 +804,18 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( func (q *blocksStoreQuerier) fetchLabelNamesFromStore( ctx context.Context, + userID string, clients map[BlocksStoreClient][]ulid.ULID, minT int64, maxT int64, matchers []storepb.LabelMatcher, -) ([][]string, storage.Warnings, []ulid.ULID, error, error) { +) ([][]string, annotations.Annotations, []ulid.ULID, error, error) { var ( - reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, cortex_tsdb.TenantIDExternalLabel, q.userID) + reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, cortex_tsdb.TenantIDExternalLabel, userID) g, gCtx = errgroup.WithContext(reqCtx) mtx = sync.Mutex{} nameSets = [][]string{} - warnings = storage.Warnings(nil) + warnings = annotations.Annotations(nil) queriedBlocks = []ulid.ULID(nil) spanLog = spanlogger.FromContext(ctx) merrMtx = sync.Mutex{} @@ -880,7 +886,7 @@ func (q *blocksStoreQuerier) fetchLabelNamesFromStore( mtx.Lock() nameSets = append(nameSets, namesResp.Names) for _, w := range namesResp.Warnings { - warnings = append(warnings, errors.New(w)) + warnings.Add(errors.New(w)) } queriedBlocks = append(queriedBlocks, myQueriedBlocks...) mtx.Unlock() @@ -899,18 +905,19 @@ func (q *blocksStoreQuerier) fetchLabelNamesFromStore( func (q *blocksStoreQuerier) fetchLabelValuesFromStore( ctx context.Context, + userID string, name string, clients map[BlocksStoreClient][]ulid.ULID, minT int64, maxT int64, matchers ...*labels.Matcher, -) ([][]string, storage.Warnings, []ulid.ULID, error, error) { +) ([][]string, annotations.Annotations, []ulid.ULID, error, error) { var ( - reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, cortex_tsdb.TenantIDExternalLabel, q.userID) + reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, cortex_tsdb.TenantIDExternalLabel, userID) g, gCtx = errgroup.WithContext(reqCtx) mtx = sync.Mutex{} valueSets = [][]string{} - warnings = storage.Warnings(nil) + warnings = annotations.Annotations(nil) queriedBlocks = []ulid.ULID(nil) spanLog = spanlogger.FromContext(ctx) merrMtx = sync.Mutex{} @@ -984,7 +991,7 @@ func (q *blocksStoreQuerier) fetchLabelValuesFromStore( mtx.Lock() valueSets = append(valueSets, valuesResp.Values) for _, w := range valuesResp.Warnings { - warnings = append(warnings, errors.New(w)) + warnings.Add(errors.New(w)) } queriedBlocks = append(queriedBlocks, myQueriedBlocks...) mtx.Unlock() diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 23114f2663..9b05c4f94b 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -825,17 +825,16 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { t.Run(testName, func(t *testing.T) { t.Parallel() - ctx := limiter.AddQueryLimiterToContext(context.Background(), testData.queryLimiter) + ctx := user.InjectOrgID(context.Background(), "user-1") + ctx = limiter.AddQueryLimiterToContext(ctx, testData.queryLimiter) reg := prometheus.NewPedanticRegistry() stores := &blocksStoreSetMock{mockedResponses: testData.storeSetResponses} finder := &blocksFinderMock{} finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(testData.finderResult, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), testData.finderErr) q := &blocksStoreQuerier{ - ctx: ctx, minT: minT, maxT: maxT, - userID: "user-1", finder: finder, stores: stores, consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), @@ -848,7 +847,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, metricName), } - set := q.Select(true, nil, matchers...) + set := q.Select(ctx, true, nil, matchers...) if testData.expectedErr != nil { assert.EqualError(t, set.Err(), testData.expectedErr.Error()) assert.IsType(t, set.Err(), testData.expectedErr) @@ -1344,17 +1343,15 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { // Splitting it because we need a new registry for names and values. // And also the initial expectedErr checking needs to be done for both. for _, testFunc := range []string{"LabelNames", "LabelValues"} { - ctx := context.Background() + ctx := user.InjectOrgID(context.Background(), "user-1") reg := prometheus.NewPedanticRegistry() stores := &blocksStoreSetMock{mockedResponses: testData.storeSetResponses} finder := &blocksFinderMock{} finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(testData.finderResult, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), testData.finderErr) q := &blocksStoreQuerier{ - ctx: ctx, minT: minT, maxT: maxT, - userID: "user-1", finder: finder, stores: stores, consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), @@ -1364,7 +1361,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { } if testFunc == "LabelNames" { - names, warnings, err := q.LabelNames() + names, warnings, err := q.LabelNames(ctx) if testData.expectedErr != "" { require.Equal(t, testData.expectedErr, err.Error()) continue @@ -1381,7 +1378,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { } if testFunc == "LabelValues" { - values, warnings, err := q.LabelValues(labels.MetricName) + values, warnings, err := q.LabelValues(ctx, labels.MetricName) if testData.expectedErr != "" { require.Equal(t, testData.expectedErr, err.Error()) continue @@ -1447,14 +1444,13 @@ func TestBlocksStoreQuerier_SelectSortedShouldHonorQueryStoreAfter(t *testing.T) t.Run(testName, func(t *testing.T) { t.Parallel() + ctx := user.InjectOrgID(context.Background(), "user-1") finder := &blocksFinderMock{} finder.On("GetBlocks", mock.Anything, "user-1", mock.Anything, mock.Anything).Return(bucketindex.Blocks(nil), map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), error(nil)) q := &blocksStoreQuerier{ - ctx: context.Background(), minT: testData.queryMinT, maxT: testData.queryMaxT, - userID: "user-1", finder: finder, stores: &blocksStoreSetMock{}, consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), @@ -1469,7 +1465,7 @@ func TestBlocksStoreQuerier_SelectSortedShouldHonorQueryStoreAfter(t *testing.T) End: testData.queryMaxT, } - set := q.selectSorted(sp) + set := q.selectSorted(ctx, sp) require.NoError(t, set.Err()) if testData.expectedMinT == 0 && testData.expectedMaxT == 0 { diff --git a/pkg/querier/distributor_queryable.go b/pkg/querier/distributor_queryable.go index ae3468cc65..10e4db4326 100644 --- a/pkg/querier/distributor_queryable.go +++ b/pkg/querier/distributor_queryable.go @@ -11,6 +11,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/ingester/client" @@ -57,10 +58,9 @@ type distributorQueryable struct { queryStoreForLabels bool } -func (d distributorQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { +func (d distributorQueryable) Querier(mint, maxt int64) (storage.Querier, error) { return &distributorQuerier{ distributor: d.distributor, - ctx: ctx, mint: mint, maxt: maxt, streaming: d.streaming, @@ -78,7 +78,6 @@ func (d distributorQueryable) UseQueryable(now time.Time, _, queryMaxT int64) bo type distributorQuerier struct { distributor Distributor - ctx context.Context mint, maxt int64 streaming bool streamingMetadata bool @@ -89,8 +88,8 @@ type distributorQuerier struct { // Select implements storage.Querier interface. // The bool passed is ignored because the series is always sorted. -func (q *distributorQuerier) Select(sortSeries bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - log, ctx := spanlogger.New(q.ctx, "distributorQuerier.Select") +func (q *distributorQuerier) Select(ctx context.Context, sortSeries bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + log, ctx := spanlogger.New(ctx, "distributorQuerier.Select") defer log.Span.Finish() minT, maxT := q.mint, q.maxt @@ -208,27 +207,27 @@ func (q *distributorQuerier) streamingSelect(ctx context.Context, sortSeries boo return storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) } -func (q *distributorQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (q *distributorQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { var ( lvs []string err error ) if q.streamingMetadata { - lvs, err = q.distributor.LabelValuesForLabelNameStream(q.ctx, model.Time(q.mint), model.Time(q.maxt), model.LabelName(name), matchers...) + lvs, err = q.distributor.LabelValuesForLabelNameStream(ctx, model.Time(q.mint), model.Time(q.maxt), model.LabelName(name), matchers...) } else { - lvs, err = q.distributor.LabelValuesForLabelName(q.ctx, model.Time(q.mint), model.Time(q.maxt), model.LabelName(name), matchers...) + lvs, err = q.distributor.LabelValuesForLabelName(ctx, model.Time(q.mint), model.Time(q.maxt), model.LabelName(name), matchers...) } return lvs, nil, err } -func (q *distributorQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (q *distributorQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { if len(matchers) > 0 { - return q.labelNamesWithMatchers(matchers...) + return q.labelNamesWithMatchers(ctx, matchers...) } - log, ctx := spanlogger.New(q.ctx, "distributorQuerier.LabelNames") + log, ctx := spanlogger.New(ctx, "distributorQuerier.LabelNames") defer log.Span.Finish() var ( @@ -246,8 +245,8 @@ func (q *distributorQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, } // labelNamesWithMatchers performs the LabelNames call by calling ingester's MetricsForLabelMatchers method -func (q *distributorQuerier) labelNamesWithMatchers(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - log, ctx := spanlogger.New(q.ctx, "distributorQuerier.labelNamesWithMatchers") +func (q *distributorQuerier) labelNamesWithMatchers(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + log, ctx := spanlogger.New(ctx, "distributorQuerier.labelNamesWithMatchers") defer log.Span.Finish() var ( diff --git a/pkg/querier/distributor_queryable_test.go b/pkg/querier/distributor_queryable_test.go index d3520adaaa..451502167e 100644 --- a/pkg/querier/distributor_queryable_test.go +++ b/pkg/querier/distributor_queryable_test.go @@ -50,10 +50,11 @@ func TestDistributorQuerier(t *testing.T) { nil) queryable := newDistributorQueryable(d, false, false, nil, 0, false) - querier, err := queryable.Querier(context.Background(), mint, maxt) + querier, err := queryable.Querier(mint, maxt) require.NoError(t, err) - seriesSet := querier.Select(true, &storage.SelectHints{Start: mint, End: maxt}) + ctx := context.Background() + seriesSet := querier.Select(ctx, true, &storage.SelectHints{Start: mint, End: maxt}) require.NoError(t, seriesSet.Err()) require.True(t, seriesSet.Next()) @@ -142,7 +143,7 @@ func TestDistributorQuerier_SelectShouldHonorQueryIngestersWithin(t *testing.T) ctx := user.InjectOrgID(context.Background(), "test") queryable := newDistributorQueryable(distributor, streamingEnabled, streamingEnabled, nil, testData.queryIngestersWithin, testData.queryStoreForLabels) - querier, err := queryable.Querier(ctx, testData.queryMinT, testData.queryMaxT) + querier, err := queryable.Querier(testData.queryMinT, testData.queryMaxT) require.NoError(t, err) limits := DefaultLimitsConfig() @@ -161,7 +162,7 @@ func TestDistributorQuerier_SelectShouldHonorQueryIngestersWithin(t *testing.T) } } - seriesSet := querier.Select(true, hints) + seriesSet := querier.Select(ctx, true, hints) require.NoError(t, seriesSet.Err()) if testData.expectedMinT == 0 && testData.expectedMaxT == 0 { @@ -231,10 +232,10 @@ func TestIngesterStreaming(t *testing.T) { ctx := user.InjectOrgID(context.Background(), "0") queryable := newDistributorQueryable(d, true, true, mergeChunks, 0, true) - querier, err := queryable.Querier(ctx, mint, maxt) + querier, err := queryable.Querier(mint, maxt) require.NoError(t, err) - seriesSet := querier.Select(true, &storage.SelectHints{Start: mint, End: maxt}) + seriesSet := querier.Select(ctx, true, &storage.SelectHints{Start: mint, End: maxt}) require.NoError(t, seriesSet.Err()) require.True(t, seriesSet.Next()) @@ -309,10 +310,10 @@ func TestIngesterStreamingMixedResults(t *testing.T) { ctx := user.InjectOrgID(context.Background(), "0") queryable := newDistributorQueryable(d, true, true, mergeChunks, 0, true) - querier, err := queryable.Querier(ctx, mint, maxt) + querier, err := queryable.Querier(mint, maxt) require.NoError(t, err) - seriesSet := querier.Select(true, &storage.SelectHints{Start: mint, End: maxt}, labels.MustNewMatcher(labels.MatchRegexp, labels.MetricName, ".*")) + seriesSet := querier.Select(ctx, true, &storage.SelectHints{Start: mint, End: maxt}, labels.MustNewMatcher(labels.MatchRegexp, labels.MetricName, ".*")) require.NoError(t, seriesSet.Err()) require.True(t, seriesSet.Next()) @@ -365,10 +366,11 @@ func TestDistributorQuerier_LabelNames(t *testing.T) { Return(metrics, nil) queryable := newDistributorQueryable(d, false, streamingEnabled, nil, 0, true) - querier, err := queryable.Querier(context.Background(), mint, maxt) + querier, err := queryable.Querier(mint, maxt) require.NoError(t, err) - names, warnings, err := querier.LabelNames(someMatchers...) + ctx := context.Background() + names, warnings, err := querier.LabelNames(ctx, someMatchers...) require.NoError(t, err) assert.Empty(t, warnings) assert.Equal(t, labelNames, names) diff --git a/pkg/querier/duplicates_test.go b/pkg/querier/duplicates_test.go index a319c4d657..629b2a4fa0 100644 --- a/pkg/querier/duplicates_test.go +++ b/pkg/querier/duplicates_test.go @@ -11,6 +11,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" "github.com/stretchr/testify/require" "github.com/cortexproject/cortex/pkg/cortexpb" @@ -115,7 +116,7 @@ type testQueryable struct { ts storage.SeriesSet } -func (t *testQueryable) Querier(_ context.Context, _, _ int64) (storage.Querier, error) { +func (t *testQueryable) Querier(_, _ int64) (storage.Querier, error) { return testQuerier{ts: t.ts}, nil } @@ -123,15 +124,15 @@ type testQuerier struct { ts storage.SeriesSet } -func (m testQuerier) Select(_ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet { +func (m testQuerier) Select(ctx context.Context, _ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet { return m.ts } -func (m testQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (m testQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } -func (m testQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (m testQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } diff --git a/pkg/querier/error_translate_queryable.go b/pkg/querier/error_translate_queryable.go index b08e18554e..ccf0cee8c7 100644 --- a/pkg/querier/error_translate_queryable.go +++ b/pkg/querier/error_translate_queryable.go @@ -8,6 +8,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -97,8 +98,8 @@ type errorTranslateQueryable struct { fn ErrTranslateFn } -func (e errorTranslateQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - q, err := e.q.Querier(ctx, mint, maxt) +func (e errorTranslateQueryable) Querier(mint, maxt int64) (storage.Querier, error) { + q, err := e.q.Querier(mint, maxt) return errorTranslateQuerier{q: q, fn: e.fn}, e.fn(err) } @@ -107,13 +108,13 @@ type errorTranslateSampleAndChunkQueryable struct { fn ErrTranslateFn } -func (e errorTranslateSampleAndChunkQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - q, err := e.q.Querier(ctx, mint, maxt) +func (e errorTranslateSampleAndChunkQueryable) Querier(mint, maxt int64) (storage.Querier, error) { + q, err := e.q.Querier(mint, maxt) return errorTranslateQuerier{q: q, fn: e.fn}, e.fn(err) } -func (e errorTranslateSampleAndChunkQueryable) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { - q, err := e.q.ChunkQuerier(ctx, mint, maxt) +func (e errorTranslateSampleAndChunkQueryable) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) { + q, err := e.q.ChunkQuerier(mint, maxt) return errorTranslateChunkQuerier{q: q, fn: e.fn}, e.fn(err) } @@ -122,13 +123,13 @@ type errorTranslateQuerier struct { fn ErrTranslateFn } -func (e errorTranslateQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - values, warnings, err := e.q.LabelValues(name, matchers...) +func (e errorTranslateQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + values, warnings, err := e.q.LabelValues(ctx, name, matchers...) return values, warnings, e.fn(err) } -func (e errorTranslateQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - values, warnings, err := e.q.LabelNames(matchers...) +func (e errorTranslateQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + values, warnings, err := e.q.LabelNames(ctx, matchers...) return values, warnings, e.fn(err) } @@ -136,8 +137,8 @@ func (e errorTranslateQuerier) Close() error { return e.fn(e.q.Close()) } -func (e errorTranslateQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - s := e.q.Select(sortSeries, hints, matchers...) +func (e errorTranslateQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + s := e.q.Select(ctx, sortSeries, hints, matchers...) return errorTranslateSeriesSet{s: s, fn: e.fn} } @@ -146,13 +147,13 @@ type errorTranslateChunkQuerier struct { fn ErrTranslateFn } -func (e errorTranslateChunkQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - values, warnings, err := e.q.LabelValues(name, matchers...) +func (e errorTranslateChunkQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + values, warnings, err := e.q.LabelValues(ctx, name, matchers...) return values, warnings, e.fn(err) } -func (e errorTranslateChunkQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - values, warnings, err := e.q.LabelNames(matchers...) +func (e errorTranslateChunkQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + values, warnings, err := e.q.LabelNames(ctx, matchers...) return values, warnings, e.fn(err) } @@ -160,8 +161,8 @@ func (e errorTranslateChunkQuerier) Close() error { return e.fn(e.q.Close()) } -func (e errorTranslateChunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet { - s := e.q.Select(sortSeries, hints, matchers...) +func (e errorTranslateChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet { + s := e.q.Select(ctx, sortSeries, hints, matchers...) return errorTranslateChunkSeriesSet{s: s, fn: e.fn} } @@ -182,7 +183,7 @@ func (e errorTranslateSeriesSet) Err() error { return e.fn(e.s.Err()) } -func (e errorTranslateSeriesSet) Warnings() storage.Warnings { +func (e errorTranslateSeriesSet) Warnings() annotations.Annotations { return e.s.Warnings() } @@ -203,6 +204,6 @@ func (e errorTranslateChunkSeriesSet) Err() error { return e.fn(e.s.Err()) } -func (e errorTranslateChunkSeriesSet) Warnings() storage.Warnings { +func (e errorTranslateChunkSeriesSet) Warnings() annotations.Annotations { return e.s.Warnings() } diff --git a/pkg/querier/error_translate_queryable_test.go b/pkg/querier/error_translate_queryable_test.go index b5293cc2a1..43c560d4c1 100644 --- a/pkg/querier/error_translate_queryable_test.go +++ b/pkg/querier/error_translate_queryable_test.go @@ -17,6 +17,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/stretchr/testify/require" "github.com/weaveworks/common/httpgrpc" @@ -176,11 +177,11 @@ type errorTestQueryable struct { err error } -func (t errorTestQueryable) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { +func (t errorTestQueryable) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) { return nil, t.err } -func (t errorTestQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { +func (t errorTestQueryable) Querier(mint, maxt int64) (storage.Querier, error) { if t.q != nil { return t.q, nil } @@ -192,11 +193,11 @@ type errorTestQuerier struct { err error } -func (t errorTestQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (t errorTestQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, t.err } -func (t errorTestQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (t errorTestQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, t.err } @@ -204,7 +205,7 @@ func (t errorTestQuerier) Close() error { return nil } -func (t errorTestQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { +func (t errorTestQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { if t.s != nil { return t.s } @@ -227,6 +228,6 @@ func (t errorTestSeriesSet) Err() error { return t.err } -func (t errorTestSeriesSet) Warnings() storage.Warnings { +func (t errorTestSeriesSet) Warnings() annotations.Annotations { return nil } diff --git a/pkg/querier/lazyquery/lazyquery.go b/pkg/querier/lazyquery/lazyquery.go index 361baa0213..93634db957 100644 --- a/pkg/querier/lazyquery/lazyquery.go +++ b/pkg/querier/lazyquery/lazyquery.go @@ -1,8 +1,11 @@ package lazyquery import ( + "context" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" ) // LazyQuerier is a lazy-loaded adapter for a storage.Querier @@ -17,12 +20,12 @@ func NewLazyQuerier(next storage.Querier) storage.Querier { } // Select implements Storage.Querier -func (l LazyQuerier) Select(selectSorted bool, params *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { +func (l LazyQuerier) Select(ctx context.Context, selectSorted bool, params *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { // make sure there is space in the buffer, to unblock the goroutine and let it die even if nobody is // waiting for the result yet (or anymore). future := make(chan storage.SeriesSet, 1) go func() { - future <- l.next.Select(selectSorted, params, matchers...) + future <- l.next.Select(ctx, selectSorted, params, matchers...) }() return &lazySeriesSet{ @@ -31,13 +34,13 @@ func (l LazyQuerier) Select(selectSorted bool, params *storage.SelectHints, matc } // LabelValues implements Storage.Querier -func (l LazyQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - return l.next.LabelValues(name, matchers...) +func (l LazyQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + return l.next.LabelValues(ctx, name, matchers...) } // LabelNames implements Storage.Querier -func (l LazyQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - return l.next.LabelNames(matchers...) +func (l LazyQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + return l.next.LabelNames(ctx, matchers...) } // Close implements Storage.Querier @@ -75,6 +78,6 @@ func (s *lazySeriesSet) Err() error { } // Warnings implements storage.SeriesSet. -func (s *lazySeriesSet) Warnings() storage.Warnings { +func (s *lazySeriesSet) Warnings() annotations.Annotations { return nil } diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index a85fb1d092..1455dbe37e 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -17,6 +17,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/thanos-io/promql-engine/engine" "github.com/thanos-io/promql-engine/logicalplan" @@ -166,8 +167,8 @@ func New(cfg Config, limits *validation.Overrides, distributor Distributor, stor queryable := NewQueryable(distributorQueryable, ns, iteratorFunc, cfg, limits, tombstonesLoader) exemplarQueryable := newDistributorExemplarQueryable(distributor) - lazyQueryable := storage.QueryableFunc(func(ctx context.Context, mint int64, maxt int64) (storage.Querier, error) { - querier, err := queryable.Querier(ctx, mint, maxt) + lazyQueryable := storage.QueryableFunc(func(mint int64, maxt int64) (storage.Querier, error) { + querier, err := queryable.Querier(mint, maxt) if err != nil { return nil, err } @@ -218,7 +219,7 @@ type sampleAndChunkQueryable struct { storage.Queryable } -func (q *sampleAndChunkQueryable) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { +func (q *sampleAndChunkQueryable) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) { return nil, errors.New("ChunkQuerier not implemented") } @@ -243,25 +244,9 @@ type QueryableWithFilter interface { // NewQueryable creates a new Queryable for cortex. func NewQueryable(distributor QueryableWithFilter, stores []QueryableWithFilter, chunkIterFn chunkIteratorFunc, cfg Config, limits *validation.Overrides, tombstonesLoader purger.TombstonesLoader) storage.Queryable { - return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - now := time.Now() - - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - - ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(limits.MaxFetchedSeriesPerQuery(userID), limits.MaxFetchedChunkBytesPerQuery(userID), limits.MaxChunksPerQuery(userID), limits.MaxFetchedDataBytesPerQuery(userID))) - - mint, maxt, err = validateQueryTimeRange(ctx, userID, mint, maxt, limits, cfg.MaxQueryIntoFuture) - if err == errEmptyTimeRange { - return storage.NoopQuerier(), nil - } else if err != nil { - return nil, err - } - + return storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { q := querier{ - ctx: ctx, + now: time.Now(), mint: mint, maxt: maxt, chunkIterFn: chunkIterFn, @@ -269,30 +254,8 @@ func NewQueryable(distributor QueryableWithFilter, stores []QueryableWithFilter, limits: limits, maxQueryIntoFuture: cfg.MaxQueryIntoFuture, queryStoreForLabels: cfg.QueryStoreForLabels, - } - - dqr, err := distributor.Querier(ctx, mint, maxt) - if err != nil { - return nil, err - } - - q.metadataQuerier = dqr - - if distributor.UseQueryable(now, mint, maxt) { - q.queriers = append(q.queriers, dqr) - } - - for _, s := range stores { - if !s.UseQueryable(now, mint, maxt) { - continue - } - - cqr, err := s.Querier(ctx, mint, maxt) - if err != nil { - return nil, err - } - - q.queriers = append(q.queriers, cqr) + distributor: distributor, + stores: stores, } return q, nil @@ -300,26 +263,68 @@ func NewQueryable(distributor QueryableWithFilter, stores []QueryableWithFilter, } type querier struct { - // used for labels and metadata queries - metadataQuerier storage.Querier - - // used for selecting series - queriers []storage.Querier - chunkIterFn chunkIteratorFunc - ctx context.Context + now time.Time mint, maxt int64 tombstonesLoader purger.TombstonesLoader limits *validation.Overrides maxQueryIntoFuture time.Duration queryStoreForLabels bool + distributor QueryableWithFilter + stores []QueryableWithFilter +} + +func (q querier) setupFromCtx(ctx context.Context) (context.Context, string, int64, int64, storage.Querier, []storage.Querier, error) { + userID, err := tenant.TenantID(ctx) + if err != nil { + return ctx, userID, 0, 0, nil, nil, err + } + + ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(q.limits.MaxFetchedSeriesPerQuery(userID), q.limits.MaxFetchedChunkBytesPerQuery(userID), q.limits.MaxChunksPerQuery(userID), q.limits.MaxFetchedDataBytesPerQuery(userID))) + + mint, maxt, err := validateQueryTimeRange(ctx, userID, q.mint, q.maxt, q.limits, q.maxQueryIntoFuture) + if err != nil { + return ctx, userID, 0, 0, nil, nil, err + } + + dqr, err := q.distributor.Querier(mint, maxt) + if err != nil { + return ctx, userID, 0, 0, nil, nil, err + } + metadataQuerier := dqr + + queriers := make([]storage.Querier, 0) + if q.distributor.UseQueryable(q.now, mint, maxt) { + queriers = append(queriers, dqr) + } + + for _, s := range q.stores { + if !s.UseQueryable(q.now, mint, maxt) { + continue + } + + cqr, err := s.Querier(mint, maxt) + if err != nil { + return ctx, userID, 0, 0, nil, nil, err + } + + queriers = append(queriers, cqr) + } + return ctx, userID, mint, maxt, metadataQuerier, queriers, nil } // Select implements storage.Querier interface. // The bool passed is ignored because the series is always sorted. -func (q querier) Select(sortSeries bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - log, ctx := spanlogger.New(q.ctx, "querier.Select") +func (q querier) Select(ctx context.Context, sortSeries bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + ctx, userID, mint, maxt, metadataQuerier, queriers, err := q.setupFromCtx(ctx) + if err == errEmptyTimeRange { + return storage.EmptySeriesSet() + } else if err != nil { + return storage.ErrSeriesSet(err) + } + + log, ctx := spanlogger.New(ctx, "querier.Select") defer log.Span.Finish() if sp != nil { @@ -327,8 +332,14 @@ func (q querier) Select(sortSeries bool, sp *storage.SelectHints, matchers ...*l } if sp == nil { + mint, maxt, err = validateQueryTimeRange(ctx, userID, mint, maxt, q.limits, q.maxQueryIntoFuture) + if err == errEmptyTimeRange { + return storage.EmptySeriesSet() + } else if err != nil { + return storage.ErrSeriesSet(err) + } // if SelectHints is null, rely on minT, maxT of querier to scope in range for Select stmt - sp = &storage.SelectHints{Start: q.mint, End: q.maxt} + sp = &storage.SelectHints{Start: mint, End: maxt} } else if sp.Func == "series" && !q.queryStoreForLabels { // Else if the querier receives a 'series' query, it means only metadata is needed. // Here we expect that metadataQuerier querier will handle that. @@ -337,12 +348,7 @@ func (q querier) Select(sortSeries bool, sp *storage.SelectHints, matchers ...*l // In this case, the query time range has already been validated when the querier has been // created. - return q.metadataQuerier.Select(true, sp, matchers...) - } - - userID, err := tenant.TenantID(ctx) - if err != nil { - return storage.ErrSeriesSet(err) + return metadataQuerier.Select(ctx, true, sp, matchers...) } // Validate query time range. Even if the time range has already been validated when we created @@ -363,7 +369,7 @@ func (q querier) Select(sortSeries bool, sp *storage.SelectHints, matchers ...*l // For series queries without specifying the start time, we prefer to // only query ingesters and not to query maxQueryLength to avoid OOM kill. if sp.Func == "series" && startMs == 0 { - return q.metadataQuerier.Select(true, sp, matchers...) + return metadataQuerier.Select(ctx, true, sp, matchers...) } startTime := model.Time(startMs) @@ -382,8 +388,8 @@ func (q querier) Select(sortSeries bool, sp *storage.SelectHints, matchers ...*l return storage.ErrSeriesSet(err) } - if len(q.queriers) == 1 { - seriesSet := q.queriers[0].Select(sortSeries, sp, matchers...) + if len(queriers) == 1 { + seriesSet := queriers[0].Select(ctx, sortSeries, sp, matchers...) if tombstones.Len() != 0 { seriesSet = series.NewDeletedSeriesSet(seriesSet, tombstones, model.Interval{Start: startTime, End: endTime}) @@ -392,16 +398,16 @@ func (q querier) Select(sortSeries bool, sp *storage.SelectHints, matchers ...*l return seriesSet } - sets := make(chan storage.SeriesSet, len(q.queriers)) - for _, querier := range q.queriers { + sets := make(chan storage.SeriesSet, len(queriers)) + for _, querier := range queriers { go func(querier storage.Querier) { // We should always select sorted here as we will need to merge the series - sets <- querier.Select(true, sp, matchers...) + sets <- querier.Select(ctx, true, sp, matchers...) }(querier) } var result []storage.SeriesSet - for range q.queriers { + for range queriers { select { case set := <-sets: result = append(result, set) @@ -422,88 +428,99 @@ func (q querier) Select(sortSeries bool, sp *storage.SelectHints, matchers ...*l } // LabelValues implements storage.Querier. -func (q querier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (q querier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + ctx, _, _, _, metadataQuerier, queriers, err := q.setupFromCtx(ctx) + if err == errEmptyTimeRange { + return nil, nil, nil + } else if err != nil { + return nil, nil, err + } if !q.queryStoreForLabels { - return q.metadataQuerier.LabelValues(name, matchers...) + return metadataQuerier.LabelValues(ctx, name, matchers...) } - if len(q.queriers) == 1 { - return q.queriers[0].LabelValues(name, matchers...) + if len(queriers) == 1 { + return queriers[0].LabelValues(ctx, name, matchers...) } var ( - g, _ = errgroup.WithContext(q.ctx) + g, _ = errgroup.WithContext(ctx) sets = [][]string{} - warnings = storage.Warnings(nil) + warnings = annotations.Annotations(nil) resMtx sync.Mutex ) - for _, querier := range q.queriers { + for _, querier := range queriers { // Need to reassign as the original variable will change and can't be relied on in a goroutine. querier := querier g.Go(func() error { // NB: Values are sorted in Cortex already. - myValues, myWarnings, err := querier.LabelValues(name, matchers...) + myValues, myWarnings, err := querier.LabelValues(ctx, name, matchers...) if err != nil { return err } resMtx.Lock() sets = append(sets, myValues) - warnings = append(warnings, myWarnings...) + warnings.Merge(myWarnings) resMtx.Unlock() return nil }) } - err := g.Wait() - if err != nil { + if err := g.Wait(); err != nil { return nil, nil, err } return strutil.MergeSlices(sets...), warnings, nil } -func (q querier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (q querier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + ctx, _, _, _, metadataQuerier, queriers, err := q.setupFromCtx(ctx) + if err == errEmptyTimeRange { + return nil, nil, nil + } else if err != nil { + return nil, nil, err + } + if !q.queryStoreForLabels { - return q.metadataQuerier.LabelNames(matchers...) + return metadataQuerier.LabelNames(ctx, matchers...) } - if len(q.queriers) == 1 { - return q.queriers[0].LabelNames(matchers...) + if len(queriers) == 1 { + return queriers[0].LabelNames(ctx, matchers...) } var ( - g, _ = errgroup.WithContext(q.ctx) + g, _ = errgroup.WithContext(ctx) sets = [][]string{} - warnings = storage.Warnings(nil) + warnings = annotations.Annotations(nil) resMtx sync.Mutex ) - for _, querier := range q.queriers { + for _, querier := range queriers { // Need to reassign as the original variable will change and can't be relied on in a goroutine. querier := querier g.Go(func() error { // NB: Names are sorted in Cortex already. - myNames, myWarnings, err := querier.LabelNames(matchers...) + myNames, myWarnings, err := querier.LabelNames(ctx, matchers...) if err != nil { return err } resMtx.Lock() sets = append(sets, myNames) - warnings = append(warnings, myWarnings...) + warnings.Merge(myWarnings) resMtx.Unlock() return nil }) } - err := g.Wait() - if err != nil { + if err := g.Wait(); err != nil { return nil, nil, err } @@ -578,7 +595,7 @@ func (s *sliceSeriesSet) Err() error { return nil } -func (s *sliceSeriesSet) Warnings() storage.Warnings { +func (s *sliceSeriesSet) Warnings() annotations.Annotations { return nil } diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index d2da0cc3fa..9d1882257a 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -17,6 +17,7 @@ import ( "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/util/annotations" v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -55,9 +56,9 @@ type wrappedQuerier struct { selectCallsArgs [][]interface{} } -func (q *wrappedQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { +func (q *wrappedQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { q.selectCallsArgs = append(q.selectCallsArgs, []interface{}{sortSeries, hints, matchers}) - return q.Querier.Select(sortSeries, hints, matchers...) + return q.Querier.Select(ctx, sortSeries, hints, matchers...) } type wrappedSampleAndChunkQueryable struct { @@ -65,8 +66,8 @@ type wrappedSampleAndChunkQueryable struct { queriers []*wrappedQuerier } -func (q *wrappedSampleAndChunkQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - querier, err := q.QueryableWithFilter.Querier(ctx, mint, maxt) +func (q *wrappedSampleAndChunkQueryable) Querier(mint, maxt int64) (storage.Querier, error) { + querier, err := q.QueryableWithFilter.Querier(mint, maxt) wQuerier := &wrappedQuerier{Querier: querier} q.queriers = append(q.queriers, wQuerier) return wQuerier, err @@ -424,7 +425,7 @@ func mockTSDB(t *testing.T, labels []labels.Labels, mint model.Time, samples int require.NoError(t, app.Commit()) - return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + return storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { return tsdb.NewBlockQuerier(head, mint, maxt) }), rSamples } @@ -865,7 +866,7 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { distributor.On("MetricsForLabelMatchersStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]metric.Metric{}, nil) queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) - q, err := queryable.Querier(ctx, util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) + q, err := queryable.Querier(util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) require.NoError(t, err) // We apply the validation here again since when initializing querier we change the start/end time, @@ -883,7 +884,7 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { } matcher := labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "test") - set := q.Select(false, hints, matcher) + set := q.Select(ctx, false, hints, matcher) require.False(t, set.Next()) // Expected to be empty. require.NoError(t, set.Err()) @@ -906,10 +907,10 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { distributor.On("LabelNamesStream", mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil) queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) - q, err := queryable.Querier(ctx, util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) + q, err := queryable.Querier(util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) require.NoError(t, err) - _, _, err = q.LabelNames() + _, _, err = q.LabelNames(ctx) require.NoError(t, err) if !testData.expectedSkipped { @@ -934,10 +935,10 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { distributor.On("MetricsForLabelMatchersStream", mock.Anything, mock.Anything, mock.Anything, matchers).Return([]metric.Metric{}, nil) queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) - q, err := queryable.Querier(ctx, util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) + q, err := queryable.Querier(util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) require.NoError(t, err) - _, _, err = q.LabelNames(matchers...) + _, _, err = q.LabelNames(ctx, matchers...) require.NoError(t, err) if !testData.expectedSkipped { @@ -961,10 +962,10 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { distributor.On("LabelValuesForLabelNameStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{}, nil) queryable, _, _ := New(cfg, overrides, distributor, queryables, purger.NewNoopTombstonesLoader(), nil, log.NewNopLogger()) - q, err := queryable.Querier(ctx, util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) + q, err := queryable.Querier(util.TimeToMillis(testData.queryStartTime), util.TimeToMillis(testData.queryEndTime)) require.NoError(t, err) - _, _, err = q.LabelValues(labels.MetricName) + _, _, err = q.LabelValues(ctx, labels.MetricName) require.NoError(t, err) if !testData.expectedSkipped { @@ -1192,11 +1193,10 @@ func NewMockStoreQueryable(cfg Config, store mockStore) storage.Queryable { } func newMockStoreQueryable(store mockStore, chunkIteratorFunc chunkIteratorFunc) storage.Queryable { - return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + return storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { return &mockStoreQuerier{ store: store, chunkIteratorFunc: chunkIteratorFunc, - ctx: ctx, mint: mint, maxt: maxt, }, nil @@ -1206,14 +1206,13 @@ func newMockStoreQueryable(store mockStore, chunkIteratorFunc chunkIteratorFunc) type mockStoreQuerier struct { store mockStore chunkIteratorFunc chunkIteratorFunc - ctx context.Context mint, maxt int64 } // Select implements storage.Querier interface. // The bool passed is ignored because the series is always sorted. -func (q *mockStoreQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - userID, err := tenant.TenantID(q.ctx) +func (q *mockStoreQuerier) Select(ctx context.Context, _ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + userID, err := tenant.TenantID(ctx) if err != nil { return storage.ErrSeriesSet(err) } @@ -1230,7 +1229,7 @@ func (q *mockStoreQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...* return storage.EmptySeriesSet() } - chunks, err := q.store.Get(q.ctx, userID, model.Time(minT), model.Time(maxT), matchers...) + chunks, err := q.store.Get(ctx, userID, model.Time(minT), model.Time(maxT), matchers...) if err != nil { return storage.ErrSeriesSet(err) } @@ -1238,11 +1237,11 @@ func (q *mockStoreQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...* return partitionChunks(chunks, q.mint, q.maxt, q.chunkIteratorFunc) } -func (q *mockStoreQuerier) LabelValues(name string, labels ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (q *mockStoreQuerier) LabelValues(ctx context.Context, name string, labels ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } -func (q *mockStoreQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (q *mockStoreQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } @@ -1426,7 +1425,7 @@ type mockQueryableWithFilter struct { useQueryableCalled bool } -func (m *mockQueryableWithFilter) Querier(_ context.Context, _, _ int64) (storage.Querier, error) { +func (m *mockQueryableWithFilter) Querier(_, _ int64) (storage.Querier, error) { return nil, nil } diff --git a/pkg/querier/remote_read.go b/pkg/querier/remote_read.go index 12a1251b44..aea82b8c54 100644 --- a/pkg/querier/remote_read.go +++ b/pkg/querier/remote_read.go @@ -42,7 +42,7 @@ func RemoteReadHandler(q storage.Queryable, logger log.Logger) http.Handler { return } - querier, err := q.Querier(ctx, int64(from), int64(to)) + querier, err := q.Querier(int64(from), int64(to)) if err != nil { errors <- err return @@ -52,7 +52,7 @@ func RemoteReadHandler(q storage.Queryable, logger log.Logger) http.Handler { Start: int64(from), End: int64(to), } - seriesSet := querier.Select(false, params, matchers...) + seriesSet := querier.Select(ctx, false, params, matchers...) resp.Results[i], err = seriesSetToQueryResponse(seriesSet) errors <- err }(i, qr) diff --git a/pkg/querier/remote_read_test.go b/pkg/querier/remote_read_test.go index c044c86ec4..13e8881a2b 100644 --- a/pkg/querier/remote_read_test.go +++ b/pkg/querier/remote_read_test.go @@ -15,6 +15,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" "github.com/stretchr/testify/require" "github.com/cortexproject/cortex/pkg/cortexpb" @@ -24,7 +25,7 @@ import ( func TestRemoteReadHandler(t *testing.T) { t.Parallel() - q := storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + q := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { return mockQuerier{ matrix: model.Matrix{ { @@ -91,18 +92,18 @@ type mockQuerier struct { matrix model.Matrix } -func (m mockQuerier) Select(sortSeries bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { +func (m mockQuerier) Select(ctx context.Context, sortSeries bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { if sp == nil { panic(fmt.Errorf("select params must be set")) } return series.MatrixToSeriesSet(sortSeries, m.matrix) } -func (m mockQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (m mockQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } -func (m mockQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (m mockQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } diff --git a/pkg/querier/series/series_set.go b/pkg/querier/series/series_set.go index d2c9270fc2..90e2b83f62 100644 --- a/pkg/querier/series/series_set.go +++ b/pkg/querier/series/series_set.go @@ -19,14 +19,15 @@ package series import ( "sort" - "github.com/cortexproject/cortex/pkg/prom1/storage/metric" - "github.com/cortexproject/cortex/pkg/purger" - "github.com/cortexproject/cortex/pkg/querier/iterators" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/util/annotations" + + "github.com/cortexproject/cortex/pkg/prom1/storage/metric" + "github.com/cortexproject/cortex/pkg/purger" + "github.com/cortexproject/cortex/pkg/querier/iterators" ) // ConcreteSeriesSet implements storage.SeriesSet. @@ -64,7 +65,7 @@ func (c *ConcreteSeriesSet) Err() error { } // Warnings implements storage.SeriesSet. -func (c *ConcreteSeriesSet) Warnings() storage.Warnings { +func (c *ConcreteSeriesSet) Warnings() annotations.Annotations { return nil } @@ -232,7 +233,7 @@ func (d DeletedSeriesSet) Err() error { return d.seriesSet.Err() } -func (d DeletedSeriesSet) Warnings() storage.Warnings { +func (d DeletedSeriesSet) Warnings() annotations.Annotations { return nil } @@ -361,10 +362,10 @@ func (emptySeriesIterator) Err() error { type seriesSetWithWarnings struct { wrapped storage.SeriesSet - warnings storage.Warnings + warnings annotations.Annotations } -func NewSeriesSetWithWarnings(wrapped storage.SeriesSet, warnings storage.Warnings) storage.SeriesSet { +func NewSeriesSetWithWarnings(wrapped storage.SeriesSet, warnings annotations.Annotations) storage.SeriesSet { return seriesSetWithWarnings{ wrapped: wrapped, warnings: warnings, @@ -383,6 +384,7 @@ func (s seriesSetWithWarnings) Err() error { return s.wrapped.Err() } -func (s seriesSetWithWarnings) Warnings() storage.Warnings { - return append(s.wrapped.Warnings(), s.warnings...) +func (s seriesSetWithWarnings) Warnings() annotations.Annotations { + w := s.wrapped.Warnings() + return w.Merge(s.warnings) } diff --git a/pkg/querier/tenantfederation/merge_queryable.go b/pkg/querier/tenantfederation/merge_queryable.go index bb13e7ace4..c765f9f2d3 100644 --- a/pkg/querier/tenantfederation/merge_queryable.go +++ b/pkg/querier/tenantfederation/merge_queryable.go @@ -10,7 +10,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/prometheus/prometheus/util/annotations" "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/tenant" @@ -48,9 +48,8 @@ func tenantQuerierCallback(queryable storage.Queryable) MergeQuerierCallback { } var queriers = make([]storage.Querier, len(tenantIDs)) - for pos, tenantID := range tenantIDs { + for pos := range tenantIDs { q, err := queryable.Querier( - user.InjectOrgID(ctx, tenantID), mint, maxt, ) @@ -96,25 +95,13 @@ type mergeQueryable struct { // Querier returns a new mergeQuerier, which aggregates results from multiple // underlying queriers into a single result. -func (m *mergeQueryable) Querier(ctx context.Context, mint int64, maxt int64) (storage.Querier, error) { - // TODO: it's necessary to think how to override context inside querier - // to mark spans created inside querier as child of a span created inside - // methods of merged querier. - ids, queriers, err := m.callback(ctx, mint, maxt) - if err != nil { - return nil, err - } - - // by pass when only single querier is returned - if m.byPassWithSingleQuerier && len(queriers) == 1 { - return queriers[0], nil - } - +func (m *mergeQueryable) Querier(mint int64, maxt int64) (storage.Querier, error) { return &mergeQuerier{ - ctx: ctx, - idLabelName: m.idLabelName, - queriers: queriers, - ids: ids, + idLabelName: m.idLabelName, + mint: mint, + maxt: maxt, + byPassWithSingleQuerier: m.byPassWithSingleQuerier, + callback: m.callback, }, nil } @@ -125,10 +112,11 @@ func (m *mergeQueryable) Querier(ctx context.Context, mint int64, maxt int64) (s // the previous value is exposed through a new label prefixed with "original_". // This behaviour is not implemented recursively type mergeQuerier struct { - ctx context.Context - queriers []storage.Querier idLabelName string - ids []string + mint, maxt int64 + callback MergeQuerierCallback + + byPassWithSingleQuerier bool } // LabelValues returns all potential values for a label name. It is not safe @@ -136,15 +124,24 @@ type mergeQuerier struct { // For the label `idLabelName` it will return all the underlying ids available. // For the label "original_" + `idLabelName it will return all the values // of the underlying queriers for `idLabelName`. -func (m *mergeQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - log, _ := spanlogger.New(m.ctx, "mergeQuerier.LabelValues") +func (m *mergeQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + ids, queriers, err := m.callback(ctx, m.mint, m.maxt) + if err != nil { + return nil, nil, err + } + + // by pass when only single querier is returned + if m.byPassWithSingleQuerier && len(queriers) == 1 { + return queriers[0].LabelValues(ctx, name, matchers...) + } + log, _ := spanlogger.New(ctx, "mergeQuerier.LabelValues") defer log.Span.Finish() - matchedTenants, filteredMatchers := filterValuesByMatchers(m.idLabelName, m.ids, matchers...) + matchedTenants, filteredMatchers := filterValuesByMatchers(m.idLabelName, ids, matchers...) if name == m.idLabelName { var labelValues = make([]string, 0, len(matchedTenants)) - for _, id := range m.ids { + for _, id := range ids { if _, matched := matchedTenants[id]; matched { labelValues = append(labelValues, id) } @@ -158,23 +155,32 @@ func (m *mergeQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([] name = m.idLabelName } - return m.mergeDistinctStringSliceWithTenants(func(ctx context.Context, q storage.Querier) ([]string, storage.Warnings, error) { - return q.LabelValues(name, filteredMatchers...) - }, matchedTenants) + return m.mergeDistinctStringSliceWithTenants(ctx, func(ctx context.Context, q storage.Querier) ([]string, annotations.Annotations, error) { + return q.LabelValues(ctx, name, filteredMatchers...) + }, matchedTenants, ids, queriers) } // LabelNames returns all the unique label names present in the underlying // queriers. It also adds the `idLabelName` and if present in the original // results the original `idLabelName`. -func (m *mergeQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - log, _ := spanlogger.New(m.ctx, "mergeQuerier.LabelNames") +func (m *mergeQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + ids, queriers, err := m.callback(ctx, m.mint, m.maxt) + if err != nil { + return nil, nil, err + } + + // by pass when only single querier is returned + if m.byPassWithSingleQuerier && len(queriers) == 1 { + return queriers[0].LabelNames(ctx, matchers...) + } + log, _ := spanlogger.New(ctx, "mergeQuerier.LabelNames") defer log.Span.Finish() - matchedTenants, filteredMatchers := filterValuesByMatchers(m.idLabelName, m.ids, matchers...) + matchedTenants, filteredMatchers := filterValuesByMatchers(m.idLabelName, ids, matchers...) - labelNames, warnings, err := m.mergeDistinctStringSliceWithTenants(func(ctx context.Context, q storage.Querier) ([]string, storage.Warnings, error) { - return q.LabelNames(filteredMatchers...) - }, matchedTenants) + labelNames, warnings, err := m.mergeDistinctStringSliceWithTenants(ctx, func(ctx context.Context, q storage.Querier) ([]string, annotations.Annotations, error) { + return q.LabelNames(ctx, filteredMatchers...) + }, matchedTenants, ids, queriers) if err != nil { return nil, nil, err } @@ -203,13 +209,13 @@ func (m *mergeQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storag return labelNames, warnings, nil } -type stringSliceFunc func(context.Context, storage.Querier) ([]string, storage.Warnings, error) +type stringSliceFunc func(context.Context, storage.Querier) ([]string, annotations.Annotations, error) type stringSliceFuncJob struct { querier storage.Querier id string result []string - warnings storage.Warnings + warnings annotations.Annotations } // mergeDistinctStringSliceWithTenants aggregates stringSliceFunc call @@ -217,10 +223,10 @@ type stringSliceFuncJob struct { // provided, all queriers are used. It removes duplicates and sorts the result. // It doesn't require the output of the stringSliceFunc to be sorted, as results // of LabelValues are not sorted. -func (m *mergeQuerier) mergeDistinctStringSliceWithTenants(f stringSliceFunc, tenants map[string]struct{}) ([]string, storage.Warnings, error) { +func (m *mergeQuerier) mergeDistinctStringSliceWithTenants(ctx context.Context, f stringSliceFunc, tenants map[string]struct{}, ids []string, queriers []storage.Querier) ([]string, annotations.Annotations, error) { var jobs []interface{} - for pos, id := range m.ids { + for pos, id := range ids { if tenants != nil { if _, matched := tenants[id]; !matched { continue @@ -228,11 +234,12 @@ func (m *mergeQuerier) mergeDistinctStringSliceWithTenants(f stringSliceFunc, te } jobs = append(jobs, &stringSliceFuncJob{ - querier: m.queriers[pos], - id: m.ids[pos], + querier: queriers[pos], + id: ids[pos], }) } + parentCtx := ctx run := func(ctx context.Context, jobIntf interface{}) error { job, ok := jobIntf.(*stringSliceFuncJob) if !ok { @@ -240,7 +247,9 @@ func (m *mergeQuerier) mergeDistinctStringSliceWithTenants(f stringSliceFunc, te } var err error - job.result, job.warnings, err = f(ctx, job.querier) + // Based on parent ctx here as we are using lazy querier. + newCtx := user.InjectOrgID(parentCtx, job.id) + job.result, job.warnings, err = f(newCtx, job.querier) if err != nil { return errors.Wrapf(err, "error querying %s %s", rewriteLabelName(m.idLabelName), job.id) } @@ -248,13 +257,13 @@ func (m *mergeQuerier) mergeDistinctStringSliceWithTenants(f stringSliceFunc, te return nil } - err := concurrency.ForEach(m.ctx, jobs, maxConcurrency, run) + err := concurrency.ForEach(ctx, jobs, maxConcurrency, run) if err != nil { return nil, nil, err } // aggregate warnings and deduplicate string results - var warnings storage.Warnings + var warnings annotations.Annotations resultMap := make(map[string]struct{}) for _, jobIntf := range jobs { job, ok := jobIntf.(*stringSliceFuncJob) @@ -267,7 +276,7 @@ func (m *mergeQuerier) mergeDistinctStringSliceWithTenants(f stringSliceFunc, te } for _, w := range job.warnings { - warnings = append(warnings, errors.Wrapf(w, "warning querying %s %s", rewriteLabelName(m.idLabelName), job.id)) + warnings.Add(errors.Wrapf(w, "warning querying %s %s", rewriteLabelName(m.idLabelName), job.id)) } } @@ -281,11 +290,7 @@ func (m *mergeQuerier) mergeDistinctStringSliceWithTenants(f stringSliceFunc, te // Close releases the resources of the Querier. func (m *mergeQuerier) Close() error { - errs := tsdb_errors.NewMulti() - for pos, id := range m.ids { - errs.Add(errors.Wrapf(m.queriers[pos].Close(), "failed to close querier for %s %s", rewriteLabelName(m.idLabelName), id)) - } - return errs.Err() + return nil } type selectJob struct { @@ -298,32 +303,45 @@ type selectJob struct { // `idLabelName` is matched on, it only considers those queriers // matching. The forwarded labelSelector is not containing those that operate // on `idLabelName`. -func (m *mergeQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - log, ctx := spanlogger.New(m.ctx, "mergeQuerier.Select") +func (m *mergeQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + ids, queriers, err := m.callback(ctx, m.mint, m.maxt) + if err != nil { + return storage.ErrSeriesSet(err) + } + + // by pass when only single querier is returned + if m.byPassWithSingleQuerier && len(queriers) == 1 { + return queriers[0].Select(ctx, sortSeries, hints, matchers...) + } + + log, ctx := spanlogger.New(ctx, "mergeQuerier.Select") defer log.Span.Finish() - matchedValues, filteredMatchers := filterValuesByMatchers(m.idLabelName, m.ids, matchers...) + matchedValues, filteredMatchers := filterValuesByMatchers(m.idLabelName, ids, matchers...) var jobs = make([]interface{}, len(matchedValues)) var seriesSets = make([]storage.SeriesSet, len(matchedValues)) var jobPos int - for labelPos := range m.ids { - if _, matched := matchedValues[m.ids[labelPos]]; !matched { + for labelPos := range ids { + if _, matched := matchedValues[ids[labelPos]]; !matched { continue } jobs[jobPos] = &selectJob{ pos: jobPos, - querier: m.queriers[labelPos], - id: m.ids[labelPos], + querier: queriers[labelPos], + id: ids[labelPos], } jobPos++ } + parentCtx := ctx run := func(ctx context.Context, jobIntf interface{}) error { job, ok := jobIntf.(*selectJob) if !ok { return fmt.Errorf("unexpected type %T", jobIntf) } + // Based on parent ctx here as we are using lazy querier. + newCtx := user.InjectOrgID(parentCtx, ids[job.pos]) seriesSets[job.pos] = &addLabelsSeriesSet{ - upstream: job.querier.Select(sortSeries, hints, filteredMatchers...), + upstream: job.querier.Select(newCtx, sortSeries, hints, filteredMatchers...), labels: labels.Labels{ { Name: m.idLabelName, @@ -334,8 +352,7 @@ func (m *mergeQuerier) Select(sortSeries bool, hints *storage.SelectHints, match return nil } - err := concurrency.ForEach(ctx, jobs, maxConcurrency, run) - if err != nil { + if err := concurrency.ForEach(ctx, jobs, maxConcurrency, run); err != nil { return storage.ErrSeriesSet(err) } @@ -417,9 +434,9 @@ func (m *addLabelsSeriesSet) Err() error { // A collection of warnings for the whole set. // Warnings could be return even iteration has not failed with error. -func (m *addLabelsSeriesSet) Warnings() storage.Warnings { +func (m *addLabelsSeriesSet) Warnings() annotations.Annotations { upstream := m.upstream.Warnings() - warnings := make(storage.Warnings, len(upstream)) + warnings := make(annotations.Annotations, len(upstream)) for pos := range upstream { warnings[pos] = errors.Wrapf(upstream[pos], "warning querying %s", labelsToString(m.labels)) } diff --git a/pkg/querier/tenantfederation/merge_queryable_test.go b/pkg/querier/tenantfederation/merge_queryable_test.go index 7beb5a20e6..e8aa04ea28 100644 --- a/pkg/querier/tenantfederation/merge_queryable_test.go +++ b/pkg/querier/tenantfederation/merge_queryable_test.go @@ -15,6 +15,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/weaveworks/common/user" @@ -42,36 +43,18 @@ type mockTenantQueryableWithFilter struct { // extraLabels are labels added to all series for all tenants. extraLabels []string // warningsByTenant are warnings that will be returned for queries of that tenant. - warningsByTenant map[string]storage.Warnings + warningsByTenant map[string]annotations.Annotations // queryErrByTenant is an error that will be returne for queries of that tenant. queryErrByTenant map[string]error } // Querier implements the storage.Queryable interface. -func (m *mockTenantQueryableWithFilter) Querier(ctx context.Context, _, _ int64) (storage.Querier, error) { - tenantIDs, err := tenant.TenantIDs(ctx) - if err != nil { - return nil, err - } - +func (m *mockTenantQueryableWithFilter) Querier(_, _ int64) (storage.Querier, error) { q := mockTenantQuerier{ - tenant: tenantIDs[0], - extraLabels: m.extraLabels, - ctx: ctx, - } - - // set warning if exists - if m.warningsByTenant != nil { - if w, ok := m.warningsByTenant[q.tenant]; ok { - q.warnings = append([]error(nil), w...) - } - } - - // set queryErr if exists - if m.queryErrByTenant != nil { - if err, ok := m.queryErrByTenant[q.tenant]; ok { - q.queryErr = err - } + extraLabels: m.extraLabels, + warnings: annotations.Annotations(nil), + warningsByTenant: m.warningsByTenant, + queryErrByTenant: m.queryErrByTenant, } return q, nil @@ -84,25 +67,28 @@ func (m *mockTenantQueryableWithFilter) UseQueryable(_ time.Time, _, _ int64) bo } type mockTenantQuerier struct { - tenant string extraLabels []string - warnings storage.Warnings + warnings annotations.Annotations queryErr error - ctx context.Context + + // warningsByTenant are warnings that will be returned for queries of that tenant. + warningsByTenant map[string]annotations.Annotations + // queryErrByTenant is an error that will be returne for queries of that tenant. + queryErrByTenant map[string]error } -func (m mockTenantQuerier) matrix() model.Matrix { +func (m mockTenantQuerier) matrix(tenant string) model.Matrix { matrix := model.Matrix{ &model.SampleStream{ Metric: model.Metric{ - "instance": "host1", - "tenant-" + model.LabelName(m.tenant): "static", + "instance": "host1", + "tenant-" + model.LabelName(tenant): "static", }, }, &model.SampleStream{ Metric: model.Metric{ - "instance": "host2." + model.LabelValue(m.tenant), + "instance": "host2." + model.LabelValue(tenant), }, }, } @@ -134,7 +120,7 @@ func metricMatches(m model.Metric, selector labels.Selector) bool { type mockSeriesSet struct { upstream storage.SeriesSet - warnings storage.Warnings + warnings annotations.Annotations queryErr error } @@ -155,17 +141,36 @@ func (m *mockSeriesSet) Err() error { // Warnings implements the storage.SeriesSet interface. It returns a collection of warnings for the whole set. // Warnings could be returned even if iteration has not failed with error. -func (m *mockSeriesSet) Warnings() storage.Warnings { +func (m *mockSeriesSet) Warnings() annotations.Annotations { return m.warnings } // Select implements the storage.Querier interface. -func (m mockTenantQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - log, _ := spanlogger.New(m.ctx, "mockTenantQuerier.select") +func (m mockTenantQuerier) Select(ctx context.Context, _ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return storage.ErrSeriesSet(err) + } + + // set warning if exists + if m.warningsByTenant != nil { + if w, ok := m.warningsByTenant[tenantIDs[0]]; ok { + m.warnings.Merge(w) + } + } + + // set queryErr if exists + if m.queryErrByTenant != nil { + if err, ok := m.queryErrByTenant[tenantIDs[0]]; ok { + m.queryErr = err + } + } + + log, _ := spanlogger.New(ctx, "mockTenantQuerier.select") defer log.Span.Finish() var matrix model.Matrix - for _, s := range m.matrix() { + for _, s := range m.matrix(tenantIDs[0]) { if metricMatches(s.Metric, matchers) { matrix = append(matrix, s) } @@ -180,9 +185,28 @@ func (m mockTenantQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...* // LabelValues implements the storage.LabelQuerier interface. // The mockTenantQuerier returns all a sorted slice of all label values and does not support reducing the result set with matchers. -func (m mockTenantQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (m mockTenantQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return nil, nil, err + } + + // set warning if exists + if m.warningsByTenant != nil { + if w, ok := m.warningsByTenant[tenantIDs[0]]; ok { + m.warnings.Merge(w) + } + } + + // set queryErr if exists + if m.queryErrByTenant != nil { + if err, ok := m.queryErrByTenant[tenantIDs[0]]; ok { + m.queryErr = err + } + } + if len(matchers) > 0 { - m.warnings = append(m.warnings, errors.New(mockMatchersNotImplemented)) + m.warnings.Add(errors.New(mockMatchersNotImplemented)) } if m.queryErr != nil { @@ -190,7 +214,7 @@ func (m mockTenantQuerier) LabelValues(name string, matchers ...*labels.Matcher) } labelValues := make(map[string]struct{}) - for _, s := range m.matrix() { + for _, s := range m.matrix(tenantIDs[0]) { for k, v := range s.Metric { if k == model.LabelName(name) { labelValues[string(v)] = struct{}{} @@ -209,7 +233,25 @@ func (m mockTenantQuerier) LabelValues(name string, matchers ...*labels.Matcher) // It returns a sorted slice of all label names in the querier. // If only one matcher is provided with label Name=seriesWithLabelNames then the resulting set will have the values of that matchers pipe-split appended. // I.e. querying for {seriesWithLabelNames="foo|bar|baz"} will have as result [bar, baz, foo, ] -func (m mockTenantQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (m mockTenantQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return nil, nil, err + } + // set warning if exists + if m.warningsByTenant != nil { + if w, ok := m.warningsByTenant[tenantIDs[0]]; ok { + m.warnings.Merge(w) + } + } + + // set queryErr if exists + if m.queryErrByTenant != nil { + if err, ok := m.queryErrByTenant[tenantIDs[0]]; ok { + m.queryErr = err + } + } + var results []string if len(matchers) == 1 && matchers[0].Name == seriesWithLabelNames { @@ -218,7 +260,7 @@ func (m mockTenantQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, st } results = strings.Split(matchers[0].Value, "|") } else if len(matchers) > 1 { - m.warnings = append(m.warnings, errors.New(mockMatchersNotImplemented)) + m.warnings.Add(errors.New(mockMatchersNotImplemented)) } if m.queryErr != nil { @@ -226,7 +268,7 @@ func (m mockTenantQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, st } labelValues := make(map[string]struct{}) - for _, s := range m.matrix() { + for _, s := range m.matrix(tenantIDs[0]) { for k := range s.Metric { labelValues[string(k)] = struct{}{} } @@ -259,14 +301,8 @@ func (s *mergeQueryableScenario) init() (storage.Querier, error) { // initialize with default tenant label q := NewQueryable(&s.queryable, !s.doNotByPassSingleQuerier) - // inject tenants into context - ctx := context.Background() - if len(s.tenants) > 0 { - ctx = user.InjectOrgID(ctx, strings.Join(s.tenants, "|")) - } - // retrieve querier - return q.Querier(ctx, mint, maxt) + return q.Querier(mint, maxt) } // selectTestCase is the inputs and expected outputs of a call to Select. @@ -279,7 +315,7 @@ type selectTestCase struct { expectedSeriesCount int // expectedLabels is the expected label sets returned by a Select filtered by the Matchers in selector. expectedLabels []labels.Labels - // expectedWarnings is a slice of storage.Warnings messages expected when querying. + // expectedWarnings is a slice of annotations.Annotations messages expected when querying. expectedWarnings []string // expectedQueryErr is the error expected when querying. expectedQueryErr error @@ -299,7 +335,7 @@ type labelNamesTestCase struct { matchers []*labels.Matcher // expectedLabelNames are the expected label names returned from the queryable. expectedLabelNames []string - // expectedWarnings is a slice of storage.Warnings messages expected when querying. + // expectedWarnings is a slice of annotations.Annotations messages expected when querying. expectedWarnings []string // expectedQueryErr is the error expected when querying. expectedQueryErr error @@ -321,7 +357,7 @@ type labelValuesTestCase struct { matchers []*labels.Matcher // expectedLabelValues are the expected label values returned from the queryable. expectedLabelValues []string - // expectedWarnings is a slice of storage.Warnings messages expected when querying. + // expectedWarnings is a slice of annotations.Annotations messages expected when querying. expectedWarnings []string // expectedQueryErr is the error expected when querying. expectedQueryErr error @@ -338,10 +374,11 @@ func TestMergeQueryable_Querier(t *testing.T) { t.Parallel() queryable := &mockTenantQueryableWithFilter{} q := NewQueryable(queryable, false /* byPassWithSingleQuerier */) - // Create a context with no tenant specified. - ctx := context.Background() - _, err := q.Querier(ctx, mint, maxt) + querier, err := q.Querier(mint, maxt) + require.NoError(t, err) + + _, _, err = querier.LabelValues(context.Background(), "test") require.EqualError(t, err, user.ErrNoOrgID.Error()) }) } @@ -375,9 +412,9 @@ var ( name: "three tenants, two with warnings", tenants: []string{"team-a", "team-b", "team-c"}, queryable: mockTenantQueryableWithFilter{ - warningsByTenant: map[string]storage.Warnings{ - "team-b": storage.Warnings([]error{errors.New("don't like them")}), - "team-c": storage.Warnings([]error{errors.New("out of office")}), + warningsByTenant: map[string]annotations.Annotations{ + "team-b": annotations.New().Add(errors.New("don't like them")), + "team-c": annotations.New().Add(errors.New("out of office")), }, }, } @@ -522,11 +559,17 @@ func TestMergeQueryable_Select(t *testing.T) { querier, err := scenario.init() require.NoError(t, err) + // inject tenants into context + ctx := context.Background() + if len(scenario.tenants) > 0 { + ctx = user.InjectOrgID(ctx, strings.Join(scenario.tenants, "|")) + } + for _, tc := range scenario.selectTestCases { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - seriesSet := querier.Select(true, &storage.SelectHints{Start: mint, End: maxt}, tc.matchers...) + seriesSet := querier.Select(ctx, true, &storage.SelectHints{Start: mint, End: maxt}, tc.matchers...) if tc.expectedQueryErr != nil { require.EqualError(t, seriesSet.Err(), tc.expectedQueryErr.Error()) @@ -675,9 +718,15 @@ func TestMergeQueryable_LabelNames(t *testing.T) { querier, err := scenario.init() require.NoError(t, err) + // inject tenants into context + ctx := context.Background() + if len(scenario.tenants) > 0 { + ctx = user.InjectOrgID(ctx, strings.Join(scenario.tenants, "|")) + } + t.Run(scenario.labelNamesTestCase.name, func(t *testing.T) { t.Parallel() - labelNames, warnings, err := querier.LabelNames(scenario.labelNamesTestCase.matchers...) + labelNames, warnings, err := querier.LabelNames(ctx, scenario.labelNamesTestCase.matchers...) if scenario.labelNamesTestCase.expectedQueryErr != nil { require.EqualError(t, err, scenario.labelNamesTestCase.expectedQueryErr.Error()) } else { @@ -854,11 +903,17 @@ func TestMergeQueryable_LabelValues(t *testing.T) { querier, err := scenario.init() require.NoError(t, err) + // inject tenants into context + ctx := context.Background() + if len(scenario.tenants) > 0 { + ctx = user.InjectOrgID(ctx, strings.Join(scenario.tenants, "|")) + } + for _, tc := range scenario.labelValuesTestCases { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - actLabelValues, warnings, err := querier.LabelValues(tc.labelName, tc.matchers...) + actLabelValues, warnings, err := querier.LabelValues(ctx, tc.labelName, tc.matchers...) if tc.expectedQueryErr != nil { require.EqualError(t, err, tc.expectedQueryErr.Error()) } else { @@ -873,13 +928,14 @@ func TestMergeQueryable_LabelValues(t *testing.T) { } // assertEqualWarnings asserts that all the expected warning messages are present. -func assertEqualWarnings(t *testing.T, exp []string, act storage.Warnings) { +func assertEqualWarnings(t *testing.T, exp []string, act annotations.Annotations) { if len(exp) == 0 && len(act) == 0 { return } var actStrings = make([]string, len(act)) - for pos := range act { - actStrings[pos] = act[pos].Error() + warnings := act.AsErrors() + for pos := range warnings { + actStrings[pos] = warnings[pos].Error() } assert.ElementsMatch(t, exp, actStrings) } @@ -932,10 +988,10 @@ func TestTracingMergeQueryable(t *testing.T) { filter := mockTenantQueryableWithFilter{} q := NewQueryable(&filter, false) // retrieve querier if set - querier, err := q.Querier(ctx, mint, maxt) + querier, err := q.Querier(mint, maxt) require.NoError(t, err) - seriesSet := querier.Select(true, &storage.SelectHints{Start: mint, + seriesSet := querier.Select(ctx, true, &storage.SelectHints{Start: mint, End: maxt}) require.NoError(t, seriesSet.Err()) diff --git a/pkg/querier/timeseries_series_set.go b/pkg/querier/timeseries_series_set.go index 3001ea2096..4c7af48377 100644 --- a/pkg/querier/timeseries_series_set.go +++ b/pkg/querier/timeseries_series_set.go @@ -3,12 +3,13 @@ package querier import ( "sort" - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/querier/iterators" - "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/util/annotations" + + "github.com/cortexproject/cortex/pkg/cortexpb" + "github.com/cortexproject/cortex/pkg/querier/iterators" ) // timeSeriesSeriesSet is a wrapper around a cortexpb.TimeSeries slice to implement to SeriesSet interface @@ -43,7 +44,7 @@ func (t *timeSeriesSeriesSet) At() storage.Series { func (t *timeSeriesSeriesSet) Err() error { return nil } // Warnings implements storage.SeriesSet interface. -func (t *timeSeriesSeriesSet) Warnings() storage.Warnings { return nil } +func (t *timeSeriesSeriesSet) Warnings() annotations.Annotations { return nil } // timeseries is a type wrapper that implements the storage.Series interface type timeseries struct { diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go index 32734cf751..92f92094c9 100644 --- a/pkg/ruler/ruler_test.go +++ b/pkg/ruler/ruler_test.go @@ -3,7 +3,7 @@ package ruler import ( "context" "fmt" - io "io" + "io" "math/rand" "net/http" "net/http/httptest" @@ -31,6 +31,7 @@ import ( "github.com/prometheus/prometheus/promql" promRules "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -110,7 +111,7 @@ func (r ruleLimits) DisabledRuleGroups(userID string) validation.DisabledRuleGro } func newEmptyQueryable() storage.Queryable { - return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + return storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { return emptyQuerier{}, nil }) } @@ -118,11 +119,11 @@ func newEmptyQueryable() storage.Queryable { type emptyQuerier struct { } -func (e emptyQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (e emptyQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } -func (e emptyQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { +func (e emptyQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } @@ -130,7 +131,7 @@ func (e emptyQuerier) Close() error { return nil } -func (e emptyQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { +func (e emptyQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { return storage.EmptySeriesSet() } @@ -141,12 +142,12 @@ func testQueryableFunc(querierTestConfig *querier.TestConfig, reg prometheus.Reg overrides, _ := validation.NewOverrides(querier.DefaultLimitsConfig(), nil) q, _, _ := querier.New(querierTestConfig.Cfg, overrides, querierTestConfig.Distributor, querierTestConfig.Stores, purger.NewNoopTombstonesLoader(), reg, logger) - return func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - return q.Querier(ctx, mint, maxt) + return func(mint, maxt int64) (storage.Querier, error) { + return q.Querier(mint, maxt) } } - return func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + return func(mint, maxt int64) (storage.Querier, error) { return storage.NoopQuerier(), nil } } diff --git a/pkg/storage/tsdb/index_cache.go b/pkg/storage/tsdb/index_cache.go index 796f5a291a..46947ebc95 100644 --- a/pkg/storage/tsdb/index_cache.go +++ b/pkg/storage/tsdb/index_cache.go @@ -4,6 +4,7 @@ import ( "flag" "fmt" "strings" + "time" "github.com/alecthomas/units" "github.com/go-kit/log" @@ -31,6 +32,8 @@ const ( IndexCacheBackendDefault = IndexCacheBackendInMemory defaultMaxItemSize = model.Bytes(128 * units.MiB) + + defaultTTL = 24 * time.Hour ) var ( @@ -178,7 +181,8 @@ func NewIndexCache(cfg IndexCacheConfig, logger log.Logger, registerer prometheu if err != nil { return nil, err } - cache, err := storecache.NewRemoteIndexCache(logger, c, nil, iReg) + // TODO(yeya24): expose TTL + cache, err := storecache.NewRemoteIndexCache(logger, c, nil, iReg, defaultTTL) if err != nil { return nil, err } @@ -189,7 +193,8 @@ func NewIndexCache(cfg IndexCacheConfig, logger log.Logger, registerer prometheu if err != nil { return nil, err } - cache, err := storecache.NewRemoteIndexCache(logger, c, nil, iReg) + // TODO(yeya24): expose TTL + cache, err := storecache.NewRemoteIndexCache(logger, c, nil, iReg, defaultTTL) if err != nil { return nil, err } diff --git a/pkg/storegateway/bucket_store_inmemory_server.go b/pkg/storegateway/bucket_store_inmemory_server.go index 195e191efa..d3902d5b23 100644 --- a/pkg/storegateway/bucket_store_inmemory_server.go +++ b/pkg/storegateway/bucket_store_inmemory_server.go @@ -5,7 +5,7 @@ import ( "github.com/gogo/protobuf/types" "github.com/pkg/errors" - "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" "github.com/thanos-io/thanos/pkg/store/hintspb" "github.com/thanos-io/thanos/pkg/store/storepb" ) @@ -20,7 +20,7 @@ type bucketStoreSeriesServer struct { ctx context.Context SeriesSet []*storepb.Series - Warnings storage.Warnings + Warnings annotations.Annotations Hints hintspb.SeriesResponseHints } @@ -30,7 +30,7 @@ func newBucketStoreSeriesServer(ctx context.Context) *bucketStoreSeriesServer { func (s *bucketStoreSeriesServer) Send(r *storepb.SeriesResponse) error { if r.GetWarning() != "" { - s.Warnings = append(s.Warnings, errors.New(r.GetWarning())) + s.Warnings.Add(errors.New(r.GetWarning())) } if rawHints := r.GetHints(); rawHints != nil { diff --git a/pkg/storegateway/bucket_stores.go b/pkg/storegateway/bucket_stores.go index e5630a2c1a..228099e1af 100644 --- a/pkg/storegateway/bucket_stores.go +++ b/pkg/storegateway/bucket_stores.go @@ -588,6 +588,7 @@ func (u *BucketStores) getOrCreateStore(userID string) (*store.BucketStore, erro return u.cfg.BucketStore.EstimatedMaxSeriesSizeBytes }), store.WithLazyExpandedPostings(u.cfg.BucketStore.LazyExpandedPostingsEnabled), + store.WithDontResort(true), // Cortex doesn't need to resort series in store gateway. } if u.logLevel.String() == "debug" { bucketStoreOpts = append(bucketStoreOpts, store.WithDebugLogging()) diff --git a/pkg/storegateway/bucket_stores_test.go b/pkg/storegateway/bucket_stores_test.go index 1b9b488768..9d57d52d42 100644 --- a/pkg/storegateway/bucket_stores_test.go +++ b/pkg/storegateway/bucket_stores_test.go @@ -21,8 +21,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/util/annotations" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -37,13 +37,11 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" - "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - - cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" - "github.com/cortexproject/cortex/pkg/storage/bucket" "github.com/cortexproject/cortex/pkg/storage/bucket/filesystem" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" + cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" ) @@ -594,7 +592,7 @@ func generateStorageBlock(t *testing.T, storageDir, userID string, metricName st require.NoError(t, db.Snapshot(userDir, true)) } -func querySeries(stores *BucketStores, userID, metricName string, minT, maxT int64) ([]*storepb.Series, storage.Warnings, error) { +func querySeries(stores *BucketStores, userID, metricName string, minT, maxT int64) ([]*storepb.Series, annotations.Annotations, error) { req := &storepb.SeriesRequest{ MinTime: minT, MaxTime: maxT, diff --git a/pkg/storegateway/gateway_test.go b/pkg/storegateway/gateway_test.go index f186063d63..b499f72aa3 100644 --- a/pkg/storegateway/gateway_test.go +++ b/pkg/storegateway/gateway_test.go @@ -1208,14 +1208,15 @@ func mockTSDB(t *testing.T, dir string, numSeries, numBlocks int, minT, maxT int db.DisableCompactions() step := (maxT - minT) / int64(numSeries) + ctx := context.Background() addSample := func(i int) { lbls := labels.Labels{labels.Label{Name: "series_id", Value: strconv.Itoa(i)}} - app := db.Appender(context.Background()) + app := db.Appender(ctx) _, err := app.Append(0, lbls, minT+(step*int64(i)), float64(i)) require.NoError(t, err) require.NoError(t, app.Commit()) - require.NoError(t, db.Compact()) + require.NoError(t, db.Compact(ctx)) } if numBlocks > 0 { i := 0 diff --git a/pkg/util/concurrency/runner.go b/pkg/util/concurrency/runner.go index 5f5078cc86..5151093b93 100644 --- a/pkg/util/concurrency/runner.go +++ b/pkg/util/concurrency/runner.go @@ -96,7 +96,7 @@ func ForEach(ctx context.Context, jobs []interface{}, concurrency int, jobFunc f return g.Wait() } -// CreateJobsFromStrings is an utility to create jobs from an slice of strings. +// CreateJobsFromStrings is a utility to create jobs from an slice of strings. func CreateJobsFromStrings(values []string) []interface{} { jobs := make([]interface{}, len(values)) for i := 0; i < len(values); i++ { diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go index 93087cd31c..48d2e66ee9 100644 --- a/vendor/github.com/hashicorp/consul/api/acl.go +++ b/vendor/github.com/hashicorp/consul/api/acl.go @@ -272,6 +272,13 @@ type ACLAuthMethod struct { Partition string `json:",omitempty"` } +type ACLTokenFilterOptions struct { + AuthMethod string `json:",omitempty"` + Policy string `json:",omitempty"` + Role string `json:",omitempty"` + ServiceName string `json:",omitempty"` +} + func (m *ACLAuthMethod) MarshalJSON() ([]byte, error) { type Alias ACLAuthMethod exported := &struct { @@ -895,6 +902,44 @@ func (a *ACL) TokenList(q *QueryOptions) ([]*ACLTokenListEntry, *QueryMeta, erro return entries, qm, nil } +// TokenListFiltered lists all tokens that match the given filter options. +// The listing does not contain any SecretIDs as those may only be retrieved by a call to TokenRead. +func (a *ACL) TokenListFiltered(t ACLTokenFilterOptions, q *QueryOptions) ([]*ACLTokenListEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/tokens") + r.setQueryOptions(q) + + if t.AuthMethod != "" { + r.params.Set("authmethod", t.AuthMethod) + } + if t.Policy != "" { + r.params.Set("policy", t.Policy) + } + if t.Role != "" { + r.params.Set("role", t.Role) + } + if t.ServiceName != "" { + r.params.Set("servicename", t.ServiceName) + } + + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLTokenListEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + // PolicyCreate will create a new policy. It is not allowed for the policy parameters // ID field to be set as this will be generated by Consul while processing the request. func (a *ACL) PolicyCreate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) { diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go index f45929cb5b..6775edf425 100644 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -274,6 +274,8 @@ type MembersOpts struct { // Segment is the LAN segment to show members for. Setting this to the // AllSegments value above will show members in all segments. Segment string + + Filter string } // AgentServiceRegistration is used to register a new service @@ -343,6 +345,7 @@ type AgentServiceCheck struct { Method string `json:",omitempty"` Body string `json:",omitempty"` TCP string `json:",omitempty"` + TCPUseTLS bool `json:",omitempty"` UDP string `json:",omitempty"` Status string `json:",omitempty"` Notes string `json:",omitempty"` @@ -790,6 +793,10 @@ func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) { r.params.Set("wan", "1") } + if opts.Filter != "" { + r.params.Set("filter", opts.Filter) + } + _, resp, err := a.c.doRequest(r) if err != nil { return nil, err diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go index 1fe0c71b61..f62c0c5a1b 100644 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -1000,6 +1000,19 @@ func (r *request) toHTTP() (*http.Request, error) { return nil, err } + // validate that socket communications that do not use the host, detect + // slashes in the host name and replace it with local host. + // this is required since go started validating req.host in 1.20.6 and 1.19.11. + // prior to that they would strip out the slashes for you. They removed that + // behavior and added more strict validation as part of a CVE. + // This issue is being tracked by the Go team: + // https://github.com/golang/go/issues/61431 + // If there is a resolution in this issue, we will remove this code. + // In the time being, this is the accepted workaround. + if strings.HasPrefix(r.url.Host, "/") { + r.url.Host = "localhost" + } + req.URL.Host = r.url.Host req.URL.Scheme = r.url.Scheme req.Host = r.url.Host diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go index 125619b55d..405e92ef27 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry.go @@ -42,7 +42,6 @@ const ( BuiltinAWSLambdaExtension string = "builtin/aws/lambda" BuiltinExtAuthzExtension string = "builtin/ext-authz" BuiltinLuaExtension string = "builtin/lua" - BuiltinLocalRatelimitExtension string = "builtin/http/localratelimit" BuiltinPropertyOverrideExtension string = "builtin/property-override" BuiltinWasmExtension string = "builtin/wasm" // BuiltinValidateExtension should not be exposed directly or accepted as a valid configured diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_jwt_provider.go b/vendor/github.com/hashicorp/consul/api/config_entry_jwt_provider.go index e27974af3e..270f0d5641 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_jwt_provider.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_jwt_provider.go @@ -7,6 +7,14 @@ import ( "time" ) +const ( + DiscoveryTypeStrictDNS ClusterDiscoveryType = "STRICT_DNS" + DiscoveryTypeStatic ClusterDiscoveryType = "STATIC" + DiscoveryTypeLogicalDNS ClusterDiscoveryType = "LOGICAL_DNS" + DiscoveryTypeEDS ClusterDiscoveryType = "EDS" + DiscoveryTypeOriginalDST ClusterDiscoveryType = "ORIGINAL_DST" +) + type JWTProviderConfigEntry struct { // Kind is the kind of configuration entry and must be "jwt-provider". Kind string `json:",omitempty"` @@ -188,6 +196,71 @@ type RemoteJWKS struct { // // There is no retry by default. RetryPolicy *JWKSRetryPolicy `json:",omitempty" alias:"retry_policy"` + + // JWKSCluster defines how the specified Remote JWKS URI is to be fetched. + JWKSCluster *JWKSCluster `json:",omitempty" alias:"jwks_cluster"` +} + +type JWKSCluster struct { + // DiscoveryType refers to the service discovery type to use for resolving the cluster. + // + // This defaults to STRICT_DNS. + // Other options include STATIC, LOGICAL_DNS, EDS or ORIGINAL_DST. + DiscoveryType ClusterDiscoveryType `json:",omitempty" alias:"discovery_type"` + + // TLSCertificates refers to the data containing certificate authority certificates to use + // in verifying a presented peer certificate. + // If not specified and a peer certificate is presented it will not be verified. + // + // Must be either CaCertificateProviderInstance or TrustedCA. + TLSCertificates *JWKSTLSCertificate `json:",omitempty" alias:"tls_certificates"` + + // The timeout for new network connections to hosts in the cluster. + // If not set, a default value of 5s will be used. + ConnectTimeout time.Duration `json:",omitempty" alias:"connect_timeout"` +} + +type ClusterDiscoveryType string + +// JWKSTLSCertificate refers to the data containing certificate authority certificates to use +// in verifying a presented peer certificate. +// If not specified and a peer certificate is presented it will not be verified. +// +// Must be either CaCertificateProviderInstance or TrustedCA. +type JWKSTLSCertificate struct { + // CaCertificateProviderInstance Certificate provider instance for fetching TLS certificates. + CaCertificateProviderInstance *JWKSTLSCertProviderInstance `json:",omitempty" alias:"ca_certificate_provider_instance"` + + // TrustedCA defines TLS certificate data containing certificate authority certificates + // to use in verifying a presented peer certificate. + // + // Exactly one of Filename, EnvironmentVariable, InlineString or InlineBytes must be specified. + TrustedCA *JWKSTLSCertTrustedCA `json:",omitempty" alias:"trusted_ca"` +} + +// JWKSTLSCertTrustedCA defines TLS certificate data containing certificate authority certificates +// to use in verifying a presented peer certificate. +// +// Exactly one of Filename, EnvironmentVariable, InlineString or InlineBytes must be specified. +type JWKSTLSCertTrustedCA struct { + Filename string `json:",omitempty" alias:"filename"` + EnvironmentVariable string `json:",omitempty" alias:"environment_variable"` + InlineString string `json:",omitempty" alias:"inline_string"` + InlineBytes []byte `json:",omitempty" alias:"inline_bytes"` +} + +type JWKSTLSCertProviderInstance struct { + // InstanceName refers to the certificate provider instance name + // + // The default value is "default". + InstanceName string `json:",omitempty" alias:"instance_name"` + + // CertificateName is used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "example.com" to specify a certificate for a + // particular domain. + // + // The default value is the empty string. + CertificateName string `json:",omitempty" alias:"certificate_name"` } type JWKSRetryPolicy struct { diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go index 932317fdb0..a023002046 100644 --- a/vendor/github.com/hashicorp/consul/api/health.go +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -67,6 +67,7 @@ type HealthCheckDefinition struct { TLSServerName string TLSSkipVerify bool TCP string + TCPUseTLS bool UDP string GRPC string OSService string diff --git a/vendor/github.com/hashicorp/consul/api/operator_audit.go b/vendor/github.com/hashicorp/consul/api/operator_audit.go new file mode 100644 index 0000000000..5240d38a70 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_audit.go @@ -0,0 +1,40 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// The /v1/operator/audit-hash endpoint is available only in Consul Enterprise and +// interact with its audit logging subsystem. + +package api + +type AuditHashRequest struct { + Input string +} + +type AuditHashResponse struct { + Hash string +} + +func (op *Operator) AuditHash(a *AuditHashRequest, q *QueryOptions) (*AuditHashResponse, error) { + r := op.c.newRequest("POST", "/v1/operator/audit-hash") + r.setQueryOptions(q) + r.obj = a + + rtt, resp, err := op.c.doRequest(r) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out AuditHashResponse + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + + return &out, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go index 393d6fb3c5..d72c00c97b 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_raft.go +++ b/vendor/github.com/hashicorp/consul/api/operator_raft.go @@ -28,6 +28,9 @@ type RaftServer struct { // it's a non-voting server, which will be added in a future release of // Consul. Voter bool + + // LastIndex is the last log index this server has a record of in its Raft log. + LastIndex uint64 } // RaftConfiguration is returned when querying for the current Raft configuration. diff --git a/vendor/github.com/hashicorp/consul/api/operator_usage.go b/vendor/github.com/hashicorp/consul/api/operator_usage.go index e47d4b53e0..8977449ddd 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_usage.go +++ b/vendor/github.com/hashicorp/consul/api/operator_usage.go @@ -10,6 +10,7 @@ type Usage struct { // ServiceUsage contains information about the number of services and service instances for a datacenter. type ServiceUsage struct { + Nodes int Services int ServiceInstances int ConnectServiceInstances map[string]int diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go index bb40e6a7fd..8ebc852f3a 100644 --- a/vendor/github.com/hashicorp/consul/api/prepared_query.go +++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go @@ -32,11 +32,11 @@ type QueryFailoverTarget struct { // Partition specifies a partition to try during failover // Note: Partition are available only in Consul Enterprise - Partition string + Partition string `json:",omitempty"` // Namespace specifies a namespace to try during failover // Note: Namespaces are available only in Consul Enterprise - Namespace string + Namespace string `json:",omitempty"` } // QueryDNSOptions controls settings when query results are served over DNS. diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index 7f7595dcdf..7824780c34 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -819,6 +819,7 @@ type AlertmanagerConfig struct { ServiceDiscoveryConfigs discovery.Configs `yaml:"-"` HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` + SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"` // The URL scheme to use when talking to Alertmanagers. Scheme string `yaml:"scheme,omitempty"` @@ -854,6 +855,13 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er return err } + httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil || + c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil + + if httpClientConfigAuthEnabled && c.SigV4Config != nil { + return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured") + } + // Check for users putting URLs in target groups. if len(c.RelabelConfigs) == 0 { if err := checkStaticTargets(c.ServiceDiscoveryConfigs); err != nil { diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go index 8b304a0faf..4d6027691f 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/manager.go +++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go @@ -180,11 +180,9 @@ func (m *Manager) Providers() []*Provider { // Run starts the background processing. func (m *Manager) Run() error { go m.sender() - for range m.ctx.Done() { - m.cancelDiscoverers() - return m.ctx.Err() - } - return nil + <-m.ctx.Done() + m.cancelDiscoverers() + return m.ctx.Err() } // SyncCh returns a read only channel used by all the clients to receive target updates. diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go index d3f013935c..41873278cb 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go @@ -15,6 +15,7 @@ package histogram import ( "fmt" + "math" "strings" ) @@ -130,6 +131,55 @@ func (h *FloatHistogram) String() string { return sb.String() } +// TestExpression returns the string representation of this histogram as it is used in the internal PromQL testing +// framework as well as in promtool rules unit tests. +// The syntax is described in https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#series +func (h *FloatHistogram) TestExpression() string { + var res []string + m := h.Copy() + + m.Compact(math.MaxInt) // Compact to reduce the number of positive and negative spans to 1. + + if m.Schema != 0 { + res = append(res, fmt.Sprintf("schema:%d", m.Schema)) + } + if m.Count != 0 { + res = append(res, fmt.Sprintf("count:%g", m.Count)) + } + if m.Sum != 0 { + res = append(res, fmt.Sprintf("sum:%g", m.Sum)) + } + if m.ZeroCount != 0 { + res = append(res, fmt.Sprintf("z_bucket:%g", m.ZeroCount)) + } + if m.ZeroThreshold != 0 { + res = append(res, fmt.Sprintf("z_bucket_w:%g", m.ZeroThreshold)) + } + + addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string { + if len(spans) > 1 { + panic(fmt.Sprintf("histogram with multiple %s spans not supported", kind)) + } + for _, span := range spans { + if span.Offset != 0 { + res = append(res, fmt.Sprintf("%s:%d", offsetKey, span.Offset)) + } + } + + var bucketStr []string + for _, bucket := range buckets { + bucketStr = append(bucketStr, fmt.Sprintf("%g", bucket)) + } + if len(bucketStr) > 0 { + res = append(res, fmt.Sprintf("%s:[%s]", bucketsKey, strings.Join(bucketStr, " "))) + } + return res + } + res = addBuckets("positive", "buckets", "offset", m.PositiveBuckets, m.PositiveSpans) + res = addBuckets("negative", "n_buckets", "n_offset", m.NegativeBuckets, m.NegativeSpans) + return "{{" + strings.Join(res, " ") + "}}" +} + // ZeroBucket returns the zero bucket. func (h *FloatHistogram) ZeroBucket() Bucket[float64] { return Bucket[float64]{ @@ -159,7 +209,7 @@ func (h *FloatHistogram) Mul(factor float64) *FloatHistogram { return h } -// Div works like Scale but divides instead of multiplies. +// Div works like Mul but divides instead of multiplies. // When dividing by 0, everything will be set to Inf. func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { h.ZeroCount /= scalar @@ -218,23 +268,17 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { h.Count += other.Count h.Sum += other.Sum - // TODO(beorn7): If needed, this can be optimized by inspecting the - // spans in other and create missing buckets in h in batches. - var iInSpan, index int32 - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index - } - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index + otherPositiveSpans := other.PositiveSpans + otherPositiveBuckets := other.PositiveBuckets + otherNegativeSpans := other.NegativeSpans + otherNegativeBuckets := other.NegativeBuckets + if other.Schema != h.Schema { + otherPositiveSpans, otherPositiveBuckets = mergeToSchema(other.PositiveSpans, other.PositiveBuckets, other.Schema, h.Schema) + otherNegativeSpans, otherNegativeBuckets = mergeToSchema(other.NegativeSpans, other.NegativeBuckets, other.Schema, h.Schema) } + + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) return h } @@ -245,25 +289,17 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { h.Count -= other.Count h.Sum -= other.Sum - // TODO(beorn7): If needed, this can be optimized by inspecting the - // spans in other and create missing buckets in h in batches. - var iInSpan, index int32 - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - b.Count *= -1 - h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index - } - for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); { - b := it.At() - b.Count *= -1 - h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( - b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index, - ) - index = b.Index + otherPositiveSpans := other.PositiveSpans + otherPositiveBuckets := other.PositiveBuckets + otherNegativeSpans := other.NegativeSpans + otherNegativeBuckets := other.NegativeBuckets + if other.Schema != h.Schema { + otherPositiveSpans, otherPositiveBuckets = mergeToSchema(other.PositiveSpans, other.PositiveBuckets, other.Schema, h.Schema) + otherNegativeSpans, otherNegativeBuckets = mergeToSchema(other.NegativeSpans, other.NegativeBuckets, other.Schema, h.Schema) } + + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) return h } @@ -298,103 +334,6 @@ func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { return true } -// addBucket takes the "coordinates" of the last bucket that was handled and -// adds the provided bucket after it. If a corresponding bucket exists, the -// count is added. If not, the bucket is inserted. The updated slices and the -// coordinates of the inserted or added-to bucket are returned. -func addBucket( - b Bucket[float64], - spans []Span, buckets []float64, - iSpan, iBucket int, - iInSpan, index int32, -) ( - newSpans []Span, newBuckets []float64, - newISpan, newIBucket int, newIInSpan int32, -) { - if iSpan == -1 { - // First add, check if it is before all spans. - if len(spans) == 0 || spans[0].Offset > b.Index { - // Add bucket before all others. - buckets = append(buckets, 0) - copy(buckets[1:], buckets) - buckets[0] = b.Count - if len(spans) > 0 && spans[0].Offset == b.Index+1 { - spans[0].Length++ - spans[0].Offset-- - return spans, buckets, 0, 0, 0 - } - spans = append(spans, Span{}) - copy(spans[1:], spans) - spans[0] = Span{Offset: b.Index, Length: 1} - if len(spans) > 1 { - // Convert the absolute offset in the formerly - // first span to a relative offset. - spans[1].Offset -= b.Index + 1 - } - return spans, buckets, 0, 0, 0 - } - if spans[0].Offset == b.Index { - // Just add to first bucket. - buckets[0] += b.Count - return spans, buckets, 0, 0, 0 - } - // We are behind the first bucket, so set everything to the - // first bucket and continue normally. - iSpan, iBucket, iInSpan = 0, 0, 0 - index = spans[0].Offset - } - deltaIndex := b.Index - index - for { - remainingInSpan := int32(spans[iSpan].Length) - iInSpan - if deltaIndex < remainingInSpan { - // Bucket is in current span. - iBucket += int(deltaIndex) - iInSpan += deltaIndex - buckets[iBucket] += b.Count - return spans, buckets, iSpan, iBucket, iInSpan - } - deltaIndex -= remainingInSpan - iBucket += int(remainingInSpan) - iSpan++ - if iSpan == len(spans) || deltaIndex < spans[iSpan].Offset { - // Bucket is in gap behind previous span (or there are no further spans). - buckets = append(buckets, 0) - copy(buckets[iBucket+1:], buckets[iBucket:]) - buckets[iBucket] = b.Count - if deltaIndex == 0 { - // Directly after previous span, extend previous span. - if iSpan < len(spans) { - spans[iSpan].Offset-- - } - iSpan-- - iInSpan = int32(spans[iSpan].Length) - spans[iSpan].Length++ - return spans, buckets, iSpan, iBucket, iInSpan - } - if iSpan < len(spans) && deltaIndex == spans[iSpan].Offset-1 { - // Directly before next span, extend next span. - iInSpan = 0 - spans[iSpan].Offset-- - spans[iSpan].Length++ - return spans, buckets, iSpan, iBucket, iInSpan - } - // No next span, or next span is not directly adjacent to new bucket. - // Add new span. - iInSpan = 0 - if iSpan < len(spans) { - spans[iSpan].Offset -= deltaIndex + 1 - } - spans = append(spans, Span{}) - copy(spans[iSpan+1:], spans[iSpan:]) - spans[iSpan] = Span{Length: 1, Offset: deltaIndex} - return spans, buckets, iSpan, iBucket, iInSpan - } - // Try start of next span. - deltaIndex -= spans[iSpan].Offset - iInSpan = 0 - } -} - // Compact eliminates empty buckets at the beginning and end of each span, then // merges spans that are consecutive or at most maxEmptyBuckets apart, and // finally splits spans that contain more consecutive empty buckets than @@ -1033,3 +972,133 @@ func mergeToSchema(originSpans []Span, originBuckets []float64, originSchema, ta return targetSpans, targetBuckets } + +// addBuckets adds the buckets described by spansB/bucketsB to the buckets described by spansA/bucketsA, +// creating missing buckets in spansA/bucketsA as needed. +// It returns the resulting spans/buckets (which must be used instead of the original spansA/bucketsA, +// although spansA/bucketsA might get modified by this function). +// All buckets must use the same provided schema. +// Buckets in spansB/bucketsB with an absolute upper limit ≤ threshold are ignored. +// If negative is true, the buckets in spansB/bucketsB are subtracted rather than added. +func addBuckets( + schema int32, threshold float64, negative bool, + spansA []Span, bucketsA []float64, + spansB []Span, bucketsB []float64, +) ([]Span, []float64) { + var ( + iSpan int = -1 + iBucket int = -1 + iInSpan int32 + indexA int32 + indexB int32 + bIdxB int + bucketB float64 + deltaIndex int32 + lowerThanThreshold = true + ) + + for _, spanB := range spansB { + indexB += spanB.Offset + for j := 0; j < int(spanB.Length); j++ { + if lowerThanThreshold && getBound(indexB, schema) <= threshold { + goto nextLoop + } + lowerThanThreshold = false + + bucketB = bucketsB[bIdxB] + if negative { + bucketB *= -1 + } + + if iSpan == -1 { + if len(spansA) == 0 || spansA[0].Offset > indexB { + // Add bucket before all others. + bucketsA = append(bucketsA, 0) + copy(bucketsA[1:], bucketsA) + bucketsA[0] = bucketB + if len(spansA) > 0 && spansA[0].Offset == indexB+1 { + spansA[0].Length++ + spansA[0].Offset-- + goto nextLoop + } else { + spansA = append(spansA, Span{}) + copy(spansA[1:], spansA) + spansA[0] = Span{Offset: indexB, Length: 1} + if len(spansA) > 1 { + // Convert the absolute offset in the formerly + // first span to a relative offset. + spansA[1].Offset -= indexB + 1 + } + goto nextLoop + } + } else if spansA[0].Offset == indexB { + // Just add to first bucket. + bucketsA[0] += bucketB + goto nextLoop + } + iSpan, iBucket, iInSpan = 0, 0, 0 + indexA = spansA[0].Offset + } + deltaIndex = indexB - indexA + for { + remainingInSpan := int32(spansA[iSpan].Length) - iInSpan + if deltaIndex < remainingInSpan { + // Bucket is in current span. + iBucket += int(deltaIndex) + iInSpan += deltaIndex + bucketsA[iBucket] += bucketB + break + } else { + deltaIndex -= remainingInSpan + iBucket += int(remainingInSpan) + iSpan++ + if iSpan == len(spansA) || deltaIndex < spansA[iSpan].Offset { + // Bucket is in gap behind previous span (or there are no further spans). + bucketsA = append(bucketsA, 0) + copy(bucketsA[iBucket+1:], bucketsA[iBucket:]) + bucketsA[iBucket] = bucketB + switch { + case deltaIndex == 0: + // Directly after previous span, extend previous span. + if iSpan < len(spansA) { + spansA[iSpan].Offset-- + } + iSpan-- + iInSpan = int32(spansA[iSpan].Length) + spansA[iSpan].Length++ + goto nextLoop + case iSpan < len(spansA) && deltaIndex == spansA[iSpan].Offset-1: + // Directly before next span, extend next span. + iInSpan = 0 + spansA[iSpan].Offset-- + spansA[iSpan].Length++ + goto nextLoop + default: + // No next span, or next span is not directly adjacent to new bucket. + // Add new span. + iInSpan = 0 + if iSpan < len(spansA) { + spansA[iSpan].Offset -= deltaIndex + 1 + } + spansA = append(spansA, Span{}) + copy(spansA[iSpan+1:], spansA[iSpan:]) + spansA[iSpan] = Span{Length: 1, Offset: deltaIndex} + goto nextLoop + } + } else { + // Try start of next span. + deltaIndex -= spansA[iSpan].Offset + iInSpan = 0 + } + } + } + + nextLoop: + indexA = indexB + indexB++ + bIdxB++ + } + } + + return spansA, bucketsA +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels.go index 0c27e15c72..3dc3049b1c 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels.go @@ -19,6 +19,7 @@ import ( "bytes" "encoding/json" "strconv" + "strings" "github.com/cespare/xxhash/v2" "github.com/prometheus/common/model" @@ -362,7 +363,7 @@ func EmptyLabels() Labels { func New(ls ...Label) Labels { set := make(Labels, 0, len(ls)) set = append(set, ls...) - slices.SortFunc(set, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(set, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) return set } @@ -386,7 +387,7 @@ func FromStrings(ss ...string) Labels { res = append(res, Label{Name: ss[i], Value: ss[i+1]}) } - slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) return res } @@ -591,7 +592,7 @@ func (b *Builder) Labels() Labels { } if len(b.add) > 0 { // Base is already in order, so we only need to sort if we add to it. res = append(res, b.add...) - slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) } return res } @@ -618,7 +619,7 @@ func (b *ScratchBuilder) Add(name, value string) { // Sort the labels added so far by name. func (b *ScratchBuilder) Sort() { - slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) } // Assign is for when you already have a Labels which you want this ScratchBuilder to return. diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go index a87545a26b..cc6bfcc700 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go @@ -20,6 +20,7 @@ import ( "encoding/json" "reflect" "strconv" + "strings" "unsafe" "github.com/cespare/xxhash/v2" @@ -412,7 +413,7 @@ func yoloBytes(s string) (b []byte) { // New returns a sorted Labels from the given labels. // The caller has to guarantee that all label names are unique. func New(ls ...Label) Labels { - slices.SortFunc(ls, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(ls, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) size := labelsSize(ls) buf := make([]byte, size) marshalLabelsToSizedBuffer(ls, buf) @@ -671,7 +672,7 @@ func (b *Builder) Labels() Labels { return b.base } - slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) slices.Sort(b.del) a, d := 0, 0 @@ -830,7 +831,7 @@ func (b *ScratchBuilder) Add(name, value string) { // Sort the labels added so far by name. func (b *ScratchBuilder) Sort() { - slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) } // Assign is for when you already have a Labels which you want this ScratchBuilder to return. diff --git a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go index 30b3face0d..03cbd8849c 100644 --- a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go +++ b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go @@ -173,17 +173,11 @@ func (r *RuleNode) Validate() (nodes []WrappedError) { }) } if r.Record.Value == "" && r.Alert.Value == "" { - if r.Record.Value == "0" { - nodes = append(nodes, WrappedError{ - err: fmt.Errorf("one of 'record' or 'alert' must be set"), - node: &r.Alert, - }) - } else { - nodes = append(nodes, WrappedError{ - err: fmt.Errorf("one of 'record' or 'alert' must be set"), - node: &r.Record, - }) - } + nodes = append(nodes, WrappedError{ + err: fmt.Errorf("one of 'record' or 'alert' must be set"), + node: &r.Record, + nodeAlt: &r.Alert, + }) } if r.Expr.Value == "" { diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go index e0833636f1..5623e6833f 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go @@ -338,7 +338,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { var ts float64 // A float is enough to hold what we need for millisecond resolution. if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { - return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) + return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) } if math.IsNaN(ts) || math.IsInf(ts, 0) { return EntryInvalid, fmt.Errorf("invalid timestamp %f", ts) @@ -391,7 +391,7 @@ func (p *OpenMetricsParser) parseComment() error { var ts float64 // A float is enough to hold what we need for millisecond resolution. if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { - return fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) + return fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) } if math.IsNaN(ts) || math.IsInf(ts, 0) { return fmt.Errorf("invalid exemplar timestamp %f", ts) @@ -461,7 +461,7 @@ func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error } val, err := parseFloat(yoloString(p.l.buf()[1:])) if err != nil { - return 0, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) + return 0, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) } // Ensure canonical NaN value. if math.IsNaN(p.exemplarVal) { diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go index 94338a6660..04c295dd00 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go @@ -348,7 +348,7 @@ func (p *PromParser) Next() (Entry, error) { return EntryInvalid, p.parseError("expected value after metric", t2) } if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil { - return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) + return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) } // Ensure canonical NaN value. if math.IsNaN(p.val) { @@ -361,7 +361,7 @@ func (p *PromParser) Next() (Entry, error) { case tTimestamp: p.hasTS = true if p.ts, err = strconv.ParseInt(yoloString(p.l.buf()), 10, 64); err != nil { - return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) + return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) } if t2 := p.nextToken(); t2 != tLinebreak { return EntryInvalid, p.parseError("expected next entry after timestamp", t2) diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go index c111bb0657..fbb84a2bd3 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go @@ -56,6 +56,10 @@ type ProtobufParser struct { fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. redoClassic bool // true after parsing a native histogram if we need to parse it again as a classic histogram. + // exemplarReturned is set to true each time an exemplar has been + // returned, and set back to false upon each Next() call. + exemplarReturned bool + // state is marked by the entry we are processing. EntryInvalid implies // that we have to decode the next MetricFamily. state Entry @@ -293,8 +297,12 @@ func (p *ProtobufParser) Metric(l *labels.Labels) string { // Exemplar writes the exemplar of the current sample into the passed // exemplar. It returns if an exemplar exists or not. In case of a native // histogram, the legacy bucket section is still used for exemplars. To ingest -// all examplars, call the Exemplar method repeatedly until it returns false. +// all exemplars, call the Exemplar method repeatedly until it returns false. func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { + if p.exemplarReturned && p.state == EntrySeries { + // We only ever return one exemplar per (non-native-histogram) series. + return false + } m := p.mf.GetMetric()[p.metricPos] var exProto *dto.Exemplar switch p.mf.GetType() { @@ -335,6 +343,7 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { } p.builder.Sort() ex.Labels = p.builder.Labels() + p.exemplarReturned = true return true } @@ -342,6 +351,7 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { // text format parser). It returns (EntryInvalid, io.EOF) if no samples were // read. func (p *ProtobufParser) Next() (Entry, error) { + p.exemplarReturned = false switch p.state { case EntryInvalid: p.metricPos = 0 diff --git a/vendor/github.com/prometheus/prometheus/notifier/notifier.go b/vendor/github.com/prometheus/prometheus/notifier/notifier.go index 891372c43e..af55799337 100644 --- a/vendor/github.com/prometheus/prometheus/notifier/notifier.go +++ b/vendor/github.com/prometheus/prometheus/notifier/notifier.go @@ -32,6 +32,7 @@ import ( "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/sigv4" "github.com/prometheus/common/version" "go.uber.org/atomic" @@ -640,6 +641,17 @@ func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger log.Logger, metri if err != nil { return nil, err } + t := client.Transport + + if cfg.SigV4Config != nil { + t, err = sigv4.NewSigV4RoundTripper(cfg.SigV4Config, client.Transport) + if err != nil { + return nil, err + } + } + + client.Transport = t + s := &alertmanagerSet{ client: client, cfg: cfg, diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index 816f20721e..161aa85acb 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -44,6 +44,7 @@ import ( "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/zeropool" ) @@ -59,6 +60,11 @@ const ( maxInt64 = 9223372036854774784 // The smallest SampleValue that can be converted to an int64 without underflow. minInt64 = -9223372036854775808 + + // Max initial size for the pooled points slices. + // The getHPointSlice and getFPointSlice functions are called with an estimated size which often can be + // over-estimated. + maxPointsSliceSize = 5000 ) type engineMetrics struct { @@ -573,7 +579,7 @@ func (ng *Engine) newTestQuery(f func(context.Context) error) Query { // // At this point per query only one EvalStmt is evaluated. Alert and record // statements are not handled by the Engine. -func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storage.Warnings, err error) { +func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annotations.Annotations, err error) { ng.metrics.currentQueries.Inc() defer func() { ng.metrics.currentQueries.Dec() @@ -666,17 +672,17 @@ func durationMilliseconds(d time.Duration) int64 { } // execEvalStmt evaluates the expression of an evaluation statement for the given time range. -func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, storage.Warnings, error) { +func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, annotations.Annotations, error) { prepareSpanTimer, ctxPrepare := query.stats.GetSpanTimer(ctx, stats.QueryPreparationTime, ng.metrics.queryPrepareTime) mint, maxt := ng.findMinMaxTime(s) - querier, err := query.queryable.Querier(ctxPrepare, mint, maxt) + querier, err := query.queryable.Querier(mint, maxt) if err != nil { prepareSpanTimer.Finish() return nil, nil, err } defer querier.Close() - ng.populateSeries(querier, s) + ng.populateSeries(ctxPrepare, querier, s) prepareSpanTimer.Finish() // Modify the offset of vector and matrix selectors for the @ modifier @@ -890,7 +896,7 @@ func (ng *Engine) getLastSubqueryInterval(path []parser.Node) time.Duration { return interval } -func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) { +func (ng *Engine) populateSeries(ctx context.Context, querier storage.Querier, s *parser.EvalStmt) { // Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range. // The evaluation of the VectorSelector inside then evaluates the given range and unsets // the variable. @@ -913,7 +919,7 @@ func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) { } evalRange = 0 hints.By, hints.Grouping = extractGroupsFromPath(path) - n.UnexpandedSeriesSet = querier.Select(false, hints, n.LabelMatchers...) + n.UnexpandedSeriesSet = querier.Select(ctx, false, hints, n.LabelMatchers...) case *parser.MatrixSelector: evalRange = n.Range @@ -952,7 +958,7 @@ func extractGroupsFromPath(p []parser.Node) (bool, []string) { return false, nil } -func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (storage.Warnings, error) { +func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations.Annotations, error) { switch e := expr.(type) { case *parser.MatrixSelector: return checkAndExpandSeriesSet(ctx, e.VectorSelector) @@ -967,7 +973,7 @@ func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (storage.War return nil, nil } -func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.Series, ws storage.Warnings, err error) { +func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.Series, ws annotations.Annotations, err error) { for it.Next() { select { case <-ctx.Done(): @@ -981,7 +987,7 @@ func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.S type errWithWarnings struct { err error - warnings storage.Warnings + warnings annotations.Annotations } func (e errWithWarnings) Error() string { return e.err.Error() } @@ -1016,7 +1022,7 @@ func (ev *evaluator) error(err error) { } // recover is the handler that turns panics into returns from the top level of evaluation. -func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error) { +func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp *error) { e := recover() if e == nil { return @@ -1032,7 +1038,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error *errp = fmt.Errorf("unexpected error: %w", err) case errWithWarnings: *errp = err.err - *ws = append(*ws, err.warnings...) + ws.Merge(err.warnings) case error: *errp = err default: @@ -1040,7 +1046,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error } } -func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws storage.Warnings, err error) { +func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws annotations.Annotations, err error) { defer ev.recover(expr, &ws, &err) v, ws = ev.eval(expr) @@ -1109,19 +1115,19 @@ func (enh *EvalNodeHelper) DropMetricName(l labels.Labels) labels.Labels { // function call results. // The prepSeries function (if provided) can be used to prepare the helper // for each series, then passed to each call funcCall. -func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, storage.Warnings), exprs ...parser.Expr) (Matrix, storage.Warnings) { +func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) { numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 matrixes := make([]Matrix, len(exprs)) origMatrixes := make([]Matrix, len(exprs)) originalNumSamples := ev.currentSamples - var warnings storage.Warnings + var warnings annotations.Annotations for i, e := range exprs { // Functions will take string arguments from the expressions, not the values. if e != nil && e.Type() != parser.ValueTypeString { // ev.currentSamples will be updated to the correct value within the ev.eval call. val, ws := ev.eval(e) - warnings = append(warnings, ws...) + warnings.Merge(ws) matrixes[i] = val.(Matrix) // Keep a copy of the original point slices so that they @@ -1188,41 +1194,24 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) } for si, series := range matrixes[i] { - for _, point := range series.Floats { - if point.T == ts { - if ev.currentSamples < ev.maxSamples { - vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: point.F, T: ts}) - if prepSeries != nil { - bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) - } - - // Move input vectors forward so we don't have to re-scan the same - // past points at the next step. - matrixes[i][si].Floats = series.Floats[1:] - ev.currentSamples++ - } else { - ev.error(ErrTooManySamples(env)) - } - } - break + switch { + case len(series.Floats) > 0 && series.Floats[0].T == ts: + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: series.Floats[0].F, T: ts}) + // Move input vectors forward so we don't have to re-scan the same + // past points at the next step. + matrixes[i][si].Floats = series.Floats[1:] + case len(series.Histograms) > 0 && series.Histograms[0].T == ts: + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: series.Histograms[0].H, T: ts}) + matrixes[i][si].Histograms = series.Histograms[1:] + default: + continue } - for _, point := range series.Histograms { - if point.T == ts { - if ev.currentSamples < ev.maxSamples { - vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: point.H, T: ts}) - if prepSeries != nil { - bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) - } - - // Move input vectors forward so we don't have to re-scan the same - // past points at the next step. - matrixes[i][si].Histograms = series.Histograms[1:] - ev.currentSamples++ - } else { - ev.error(ErrTooManySamples(env)) - } - } - break + if prepSeries != nil { + bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) + } + ev.currentSamples++ + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) } } args[i] = vectors[i] @@ -1233,7 +1222,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) enh.Ts = ts result, ws := funcCall(args, bufHelpers, enh) enh.Out = result[:0] // Reuse result vector. - warnings = append(warnings, ws...) + warnings.Merge(ws) ev.currentSamples += len(result) // When we reset currentSamples to tempNumSamples during the next iteration of the loop it also @@ -1310,7 +1299,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // evalSubquery evaluates given SubqueryExpr and returns an equivalent // evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set. -func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, storage.Warnings) { +func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) { samplesStats := ev.samplesStats // Avoid double counting samples when running a subquery, those samples will be counted in later stage. ev.samplesStats = ev.samplesStats.NewChild() @@ -1343,7 +1332,7 @@ func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSele } // eval evaluates the given expression as the given AST expression node requires. -func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { +func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotations) { // This is the top-level evaluation method. // Thus, we check for timeout/cancellation here. if err := contextDone(ev.ctx, "expression evaluation"); err != nil { @@ -1372,17 +1361,17 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { param := unwrapStepInvariantExpr(e.Param) unwrapParenExpr(¶m) if s, ok := param.(*parser.StringLiteral); ok { - return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return ev.aggregation(e.Op, sortedGrouping, e.Without, s.Val, v[0].(Vector), sh[0], enh), nil + return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.aggregation(e, sortedGrouping, s.Val, v[0].(Vector), sh[0], enh) }, e.Expr) } - return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { var param float64 if e.Param != nil { param = v[0].(Vector)[0].F } - return ev.aggregation(e.Op, sortedGrouping, e.Without, param, v[1].(Vector), sh[1], enh), nil + return ev.aggregation(e, sortedGrouping, param, v[1].(Vector), sh[1], enh) }, e.Param, e.Expr) case *parser.Call: @@ -1404,7 +1393,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { var ( matrixArgIndex int matrixArg bool - warnings storage.Warnings + warnings annotations.Annotations ) for i := range e.Args { unwrapParenExpr(&e.Args[i]) @@ -1422,7 +1411,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { // Replacing parser.SubqueryExpr with parser.MatrixSelector. val, totalSamples, ws := ev.evalSubquery(subq) e.Args[i] = val - warnings = append(warnings, ws...) + warnings.Merge(ws) defer func() { // subquery result takes space in the memory. Get rid of that at the end. val.VectorSelector.(*parser.VectorSelector).Series = nil @@ -1433,8 +1422,9 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } if !matrixArg { // Does not have a matrix argument. - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return call(v, e.Args, enh), warnings + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec, annos := call(v, e.Args, enh) + return vec, warnings.Merge(annos) }, e.Args...) } @@ -1448,7 +1438,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { otherArgs[i] = val.(Matrix) otherInArgs[i] = Vector{Sample{}} inArgs[i] = otherInArgs[i] - warnings = append(warnings, ws...) + warnings.Merge(ws) } } @@ -1459,7 +1449,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { selVS := sel.VectorSelector.(*parser.VectorSelector) ws, err := checkAndExpandSeriesSet(ev.ctx, sel) - warnings = append(warnings, ws...) + warnings.Merge(ws) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), warnings}) } @@ -1522,8 +1512,10 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { inMatrix[0].Histograms = histograms enh.Ts = ts // Make the function call. - outVec := call(inArgs, e.Args, enh) + outVec, annos := call(inArgs, e.Args, enh) + warnings.Merge(annos) ev.samplesStats.IncrementSamplesAtStep(step, int64(len(floats)+len(histograms))) + enh.Out = outVec[:0] if len(outVec) > 0 { if outVec[0].H == nil { @@ -1626,7 +1618,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { case *parser.BinaryExpr: switch lt, rt := e.LHS.Type(), e.RHS.Type(); { case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { val := scalarBinop(e.Op, v[0].(Vector)[0].F, v[1].(Vector)[0].F) return append(enh.Out, Sample{F: val}), nil }, e.LHS, e.RHS) @@ -1639,36 +1631,36 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } switch e.Op { case parser.LAND: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LOR: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LUNLESS: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) default: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) } case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh), nil }, e.LHS, e.RHS) case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh), nil }, e.LHS, e.RHS) } case *parser.NumberLiteral: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil }) @@ -1834,7 +1826,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { panic(fmt.Errorf("unhandled expression of type: %T", expr)) } -func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, storage.Warnings) { +func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, annotations.Annotations) { ws, err := checkAndExpandSeriesSet(ev.ctx, vs) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) @@ -1846,7 +1838,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)) } - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { + return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if vs.Timestamp != nil { // This is a special case for "timestamp()" when the @ modifier is used, to ensure that // we return a point for each time step in this case. @@ -1874,7 +1866,8 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec } } ev.samplesStats.UpdatePeak(ev.currentSamples) - return call([]parser.Value{vec}, e.Args, enh), ws + vec, annos := call([]parser.Value{vec}, e.Args, enh) + return vec, ws.Merge(annos) }) } @@ -1922,19 +1915,33 @@ func getFPointSlice(sz int) []FPoint { if p := fPointPool.Get(); p != nil { return p } + + if sz > maxPointsSliceSize { + sz = maxPointsSliceSize + } + return make([]FPoint, 0, sz) } +// putFPointSlice will return a FPoint slice of size max(maxPointsSliceSize, sz). +// This function is called with an estimated size which often can be over-estimated. func putFPointSlice(p []FPoint) { if p != nil { fPointPool.Put(p[:0]) } } +// getHPointSlice will return a HPoint slice of size max(maxPointsSliceSize, sz). +// This function is called with an estimated size which often can be over-estimated. func getHPointSlice(sz int) []HPoint { if p := hPointPool.Get(); p != nil { return p } + + if sz > maxPointsSliceSize { + sz = maxPointsSliceSize + } + return make([]HPoint, 0, sz) } @@ -1945,7 +1952,7 @@ func putHPointSlice(p []HPoint) { } // matrixSelector evaluates a *parser.MatrixSelector expression. -func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storage.Warnings) { +func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annotations.Annotations) { var ( vs = node.VectorSelector.(*parser.VectorSelector) @@ -2525,7 +2532,10 @@ type groupedAggregation struct { // aggregation evaluates an aggregation operation on a Vector. The provided grouping labels // must be sorted. -func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without bool, param interface{}, vec Vector, seriesHelper []EvalSeriesHelper, enh *EvalNodeHelper) Vector { +func (ev *evaluator) aggregation(e *parser.AggregateExpr, grouping []string, param interface{}, vec Vector, seriesHelper []EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + op := e.Op + without := e.Without + annos := annotations.Annotations{} result := map[uint64]*groupedAggregation{} orderedResult := []*groupedAggregation{} var k int64 @@ -2536,7 +2546,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without } k = int64(f) if k < 1 { - return Vector{} + return Vector{}, annos } } var q float64 @@ -2789,7 +2799,8 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without case parser.AVG: if aggr.hasFloat && aggr.hasHistogram { // We cannot aggregate histogram sample with a float64 sample. - // TODO(zenador): Issue warning when plumbing is in place. + metricName := aggr.labels.Get(labels.MetricName) + annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, e.Expr.PositionRange())) continue } if aggr.hasHistogram { @@ -2834,12 +2845,16 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without continue // Bypass default append. case parser.QUANTILE: + if math.IsNaN(q) || q < 0 || q > 1 { + annos.Add(annotations.NewInvalidQuantileWarning(q, e.Param.PositionRange())) + } aggr.floatValue = quantile(q, aggr.heap) case parser.SUM: if aggr.hasFloat && aggr.hasHistogram { // We cannot aggregate histogram sample with a float64 sample. - // TODO(zenador): Issue warning when plumbing is in place. + metricName := aggr.labels.Get(labels.MetricName) + annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, e.Expr.PositionRange())) continue } if aggr.hasHistogram { @@ -2855,7 +2870,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without H: aggr.histogramValue, }) } - return enh.Out + return enh.Out, annos } // groupingKey builds and returns the grouping key for the given metric and diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index 96bffab96d..8eb0cad8ad 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -28,6 +28,8 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/parser/posrange" + "github.com/prometheus/prometheus/util/annotations" ) // FunctionCall is the type of a PromQL function implementation @@ -51,20 +53,20 @@ import ( // metrics, the timestamp are not needed. // // Scalar results should be returned as the value of a sample in a Vector. -type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector +type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) // === time() float64 === -func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return Vector{Sample{ F: float64(enh.Ts) / 1000, - }} + }}, nil } // extrapolatedRate is a utility function for rate/increase/delta. // It calculates the rate (allowing for counter resets if isCounter is true), // extrapolates if the first/last sample is close to the boundary, and returns // the result as either per-second (if isRate is true) or overall. -func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) Vector { +func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) { ms := args[0].(*parser.MatrixSelector) vs := ms.VectorSelector.(*parser.VectorSelector) var ( @@ -75,14 +77,19 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod resultHistogram *histogram.FloatHistogram firstT, lastT int64 numSamplesMinusOne int + annos = annotations.Annotations{} ) // We need either at least two Histograms and no Floats, or at least two // Floats and no Histograms to calculate a rate. Otherwise, drop this // Vector element. + metricName := samples.Metric.Get(labels.MetricName) if len(samples.Histograms) > 0 && len(samples.Floats) > 0 { - // Mix of histograms and floats. TODO(beorn7): Communicate this failure reason. - return enh.Out + return enh.Out, annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange())) + } + + if isCounter && !strings.HasSuffix(metricName, "_total") && !strings.HasSuffix(metricName, "_sum") && !strings.HasSuffix(metricName, "_count") { + annos.Add(annotations.NewPossibleNonCounterInfo(metricName, args[0].PositionRange())) } switch { @@ -90,11 +97,11 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod numSamplesMinusOne = len(samples.Histograms) - 1 firstT = samples.Histograms[0].T lastT = samples.Histograms[numSamplesMinusOne].T - resultHistogram = histogramRate(samples.Histograms, isCounter) + var newAnnos annotations.Annotations + resultHistogram, newAnnos = histogramRate(samples.Histograms, isCounter, metricName, args[0].PositionRange()) if resultHistogram == nil { // The histograms are not compatible with each other. - // TODO(beorn7): Communicate this failure reason. - return enh.Out + return enh.Out, annos.Merge(newAnnos) } case len(samples.Floats) > 1: numSamplesMinusOne = len(samples.Floats) - 1 @@ -113,8 +120,8 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod prevValue = currPoint.F } default: - // Not enough samples. TODO(beorn7): Communicate this failure reason. - return enh.Out + // TODO: add RangeTooShortWarning + return enh.Out, annos } // Duration between first/last samples and boundary of range. @@ -165,17 +172,18 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod resultHistogram.Mul(factor) } - return append(enh.Out, Sample{F: resultFloat, H: resultHistogram}) + return append(enh.Out, Sample{F: resultFloat, H: resultHistogram}), annos } // histogramRate is a helper function for extrapolatedRate. It requires // points[0] to be a histogram. It returns nil if any other Point in points is -// not a histogram. -func histogramRate(points []HPoint, isCounter bool) *histogram.FloatHistogram { +// not a histogram, and a warning wrapped in an annotation in that case. +// Otherwise, it returns the calculated histogram and an empty annotation. +func histogramRate(points []HPoint, isCounter bool, metricName string, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) { prev := points[0].H last := points[len(points)-1].H if last == nil { - return nil // Range contains a mix of histograms and floats. + return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos)) } minSchema := prev.Schema if last.Schema < minSchema { @@ -190,7 +198,7 @@ func histogramRate(points []HPoint, isCounter bool) *histogram.FloatHistogram { for _, currPoint := range points[1 : len(points)-1] { curr := currPoint.H if curr == nil { - return nil // Range contains a mix of histograms and floats. + return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos)) } // TODO(trevorwhitney): Check if isCounter is consistent with curr.CounterResetHint. if !isCounter { @@ -216,40 +224,41 @@ func histogramRate(points []HPoint, isCounter bool) *histogram.FloatHistogram { } h.CounterResetHint = histogram.GaugeType - return h.Compact(0) + return h.Compact(0), nil } -// === delta(Matrix parser.ValueTypeMatrix) Vector === -func funcDelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcDelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return extrapolatedRate(vals, args, enh, false, false) } -// === rate(node parser.ValueTypeMatrix) Vector === -func funcRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === rate(node parser.ValueTypeMatrix) (Vector, Annotations) === +func funcRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return extrapolatedRate(vals, args, enh, true, true) } -// === increase(node parser.ValueTypeMatrix) Vector === -func funcIncrease(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === increase(node parser.ValueTypeMatrix) (Vector, Annotations) === +func funcIncrease(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return extrapolatedRate(vals, args, enh, true, false) } -// === irate(node parser.ValueTypeMatrix) Vector === -func funcIrate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === irate(node parser.ValueTypeMatrix) (Vector, Annotations) === +func funcIrate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return instantValue(vals, enh.Out, true) } -// === idelta(node model.ValMatrix) Vector === -func funcIdelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === idelta(node model.ValMatrix) (Vector, Annotations) === +func funcIdelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return instantValue(vals, enh.Out, false) } -func instantValue(vals []parser.Value, out Vector, isRate bool) Vector { +func instantValue(vals []parser.Value, out Vector, isRate bool) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] // No sense in trying to compute a rate without at least two points. Drop // this Vector element. + // TODO: add RangeTooShortWarning if len(samples.Floats) < 2 { - return out + return out, nil } lastSample := samples.Floats[len(samples.Floats)-1] @@ -266,7 +275,7 @@ func instantValue(vals []parser.Value, out Vector, isRate bool) Vector { sampledInterval := lastSample.T - previousSample.T if sampledInterval == 0 { // Avoid dividing by 0. - return out + return out, nil } if isRate { @@ -274,7 +283,7 @@ func instantValue(vals []parser.Value, out Vector, isRate bool) Vector { resultValue /= float64(sampledInterval) / 1000 } - return append(out, Sample{F: resultValue}) + return append(out, Sample{F: resultValue}), nil } // Calculate the trend value at the given index i in raw data d. @@ -299,7 +308,7 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 { // data. A lower smoothing factor increases the influence of historical data. The trend factor (0 < tf < 1) affects // how trends in historical data will affect the current data. A higher trend factor increases the influence. // of trends. Algorithm taken from https://en.wikipedia.org/wiki/Exponential_smoothing titled: "Double exponential smoothing". -func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] // The smoothing factor argument. @@ -320,7 +329,7 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode // Can't do the smoothing operation with less than two points. if l < 2 { - return enh.Out + return enh.Out, nil } var s0, s1, b float64 @@ -342,34 +351,34 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode s0, s1 = s1, x+y } - return append(enh.Out, Sample{F: s1}) + return append(enh.Out, Sample{F: s1}), nil } -// === sort(node parser.ValueTypeVector) Vector === -func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === sort(node parser.ValueTypeVector) (Vector, Annotations) === +func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take descending sort with NaN first and // reverse it. byValueSorter := vectorByReverseValueHeap(vals[0].(Vector)) sort.Sort(sort.Reverse(byValueSorter)) - return Vector(byValueSorter) + return Vector(byValueSorter), nil } -// === sortDesc(node parser.ValueTypeVector) Vector === -func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === sortDesc(node parser.ValueTypeVector) (Vector, Annotations) === +func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take ascending sort with NaN first and // reverse it. byValueSorter := vectorByValueHeap(vals[0].(Vector)) sort.Sort(sort.Reverse(byValueSorter)) - return Vector(byValueSorter) + return Vector(byValueSorter), nil } -// === clamp(Vector parser.ValueTypeVector, min, max Scalar) Vector === -func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === +func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) min := vals[1].(Vector)[0].F max := vals[2].(Vector)[0].F if max < min { - return enh.Out + return enh.Out, nil } for _, el := range vec { enh.Out = append(enh.Out, Sample{ @@ -377,11 +386,11 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper F: math.Max(min, math.Min(max, el.F)), }) } - return enh.Out + return enh.Out, nil } -// === clamp_max(Vector parser.ValueTypeVector, max Scalar) Vector === -func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) === +func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) max := vals[1].(Vector)[0].F for _, el := range vec { @@ -390,11 +399,11 @@ func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel F: math.Min(max, el.F), }) } - return enh.Out + return enh.Out, nil } -// === clamp_min(Vector parser.ValueTypeVector, min Scalar) Vector === -func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) === +func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) min := vals[1].(Vector)[0].F for _, el := range vec { @@ -403,11 +412,11 @@ func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel F: math.Max(min, el.F), }) } - return enh.Out + return enh.Out, nil } -// === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) Vector === -func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) (Vector, Annotations) === +func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) // round returns a number rounded to toNearest. // Ties are solved by rounding up. @@ -425,16 +434,16 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper F: f, }) } - return enh.Out + return enh.Out, nil } // === Scalar(node parser.ValueTypeVector) Scalar === -func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { v := vals[0].(Vector) if len(v) != 1 { - return append(enh.Out, Sample{F: math.NaN()}) + return append(enh.Out, Sample{F: math.NaN()}), nil } - return append(enh.Out, Sample{F: v[0].F}) + return append(enh.Out, Sample{F: v[0].F}), nil } func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector { @@ -449,13 +458,14 @@ func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Seri return append(enh.Out, Sample{H: aggrFn(el)}) } -// === avg_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - if len(vals[0].(Matrix)[0].Floats) > 0 && len(vals[0].(Matrix)[0].Histograms) > 0 { - // TODO(zenador): Add warning for mixed floats and histograms. - return enh.Out +// === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + firstSeries := vals[0].(Matrix)[0] + if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 { + metricName := firstSeries.Metric.Get(labels.MetricName) + return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange())) } - if len(vals[0].(Matrix)[0].Floats) == 0 { + if len(firstSeries.Floats) == 0 { // The passed values only contain histograms. return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { count := 1 @@ -475,7 +485,7 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } } return mean - }) + }), nil } return aggrOverTime(vals, enh, func(s Series) float64 { var mean, count, c float64 @@ -505,18 +515,18 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode return mean } return mean + c - }) + }), nil } -// === count_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === count_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === +func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return aggrOverTime(vals, enh, func(s Series) float64 { return float64(len(s.Floats) + len(s.Histograms)) - }) + }), nil } -// === last_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === +func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { el := vals[0].(Matrix)[0] var f FPoint @@ -533,22 +543,22 @@ func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNod return append(enh.Out, Sample{ Metric: el.Metric, F: f.F, - }) + }), nil } return append(enh.Out, Sample{ Metric: el.Metric, H: h.H, - }) + }), nil } -// === max_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if len(vals[0].(Matrix)[0].Floats) == 0 { // TODO(beorn7): The passed values only contain // histograms. max_over_time ignores histograms for now. If // there are only histograms, we have to return without adding // anything to enh.Out. - return enh.Out + return enh.Out, nil } return aggrOverTime(vals, enh, func(s Series) float64 { max := s.Floats[0].F @@ -558,17 +568,17 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } } return max - }) + }), nil } -// === min_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if len(vals[0].(Matrix)[0].Floats) == 0 { // TODO(beorn7): The passed values only contain // histograms. min_over_time ignores histograms for now. If // there are only histograms, we have to return without adding // anything to enh.Out. - return enh.Out + return enh.Out, nil } return aggrOverTime(vals, enh, func(s Series) float64 { min := s.Floats[0].F @@ -578,16 +588,17 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } } return min - }) + }), nil } -// === sum_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - if len(vals[0].(Matrix)[0].Floats) > 0 && len(vals[0].(Matrix)[0].Histograms) > 0 { - // TODO(zenador): Add warning for mixed floats and histograms. - return enh.Out +// === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + firstSeries := vals[0].(Matrix)[0] + if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 { + metricName := firstSeries.Metric.Get(labels.MetricName) + return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange())) } - if len(vals[0].(Matrix)[0].Floats) == 0 { + if len(firstSeries.Floats) == 0 { // The passed values only contain histograms. return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { sum := s.Histograms[0].H.Copy() @@ -601,7 +612,7 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } } return sum - }) + }), nil } return aggrOverTime(vals, enh, func(s Series) float64 { var sum, c float64 @@ -612,11 +623,11 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode return sum } return sum + c - }) + }), nil } -// === quantile_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === quantile_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { q := vals[0].(Vector)[0].F el := vals[1].(Matrix)[0] if len(el.Floats) == 0 { @@ -624,24 +635,29 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva // histograms. quantile_over_time ignores histograms for now. If // there are only histograms, we have to return without adding // anything to enh.Out. - return enh.Out + return enh.Out, nil + } + + annos := annotations.Annotations{} + if math.IsNaN(q) || q < 0 || q > 1 { + annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange())) } values := make(vectorByValueHeap, 0, len(el.Floats)) for _, f := range el.Floats { values = append(values, Sample{F: f.F}) } - return append(enh.Out, Sample{F: quantile(q, values)}) + return append(enh.Out, Sample{F: quantile(q, values)}), annos } -// === stddev_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if len(vals[0].(Matrix)[0].Floats) == 0 { // TODO(beorn7): The passed values only contain // histograms. stddev_over_time ignores histograms for now. If // there are only histograms, we have to return without adding // anything to enh.Out. - return enh.Out + return enh.Out, nil } return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 @@ -654,17 +670,17 @@ func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return math.Sqrt((aux + cAux) / count) - }) + }), nil } -// === stdvar_over_time(Matrix parser.ValueTypeMatrix) Vector === -func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === stdvar_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if len(vals[0].(Matrix)[0].Floats) == 0 { // TODO(beorn7): The passed values only contain // histograms. stdvar_over_time ignores histograms for now. If // there are only histograms, we have to return without adding // anything to enh.Out. - return enh.Out + return enh.Out, nil } return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 @@ -677,35 +693,35 @@ func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return (aux + cAux) / count - }) + }), nil } -// === absent(Vector parser.ValueTypeVector) Vector === -func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === absent(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if len(vals[0].(Vector)) > 0 { - return enh.Out + return enh.Out, nil } return append(enh.Out, Sample{ Metric: createLabelsForAbsentFunction(args[0]), F: 1, - }) + }), nil } -// === absent_over_time(Vector parser.ValueTypeMatrix) Vector === +// === absent_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) === // As this function has a matrix as argument, it does not get all the Series. // This function will return 1 if the matrix has at least one element. // Due to engine optimization, this function is only called when this condition is true. // Then, the engine post-processes the results to get the expected output. -func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return append(enh.Out, Sample{F: 1}) +func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return append(enh.Out, Sample{F: 1}), nil } -// === present_over_time(Vector parser.ValueTypeMatrix) Vector === -func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === present_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) === +func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return aggrOverTime(vals, enh, func(s Series) float64 { return 1 - }) + }), nil } func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector { @@ -720,127 +736,127 @@ func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float6 return enh.Out } -// === abs(Vector parser.ValueTypeVector) Vector === -func funcAbs(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Abs) +// === abs(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAbs(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Abs), nil } -// === ceil(Vector parser.ValueTypeVector) Vector === -func funcCeil(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Ceil) +// === ceil(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcCeil(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Ceil), nil } -// === floor(Vector parser.ValueTypeVector) Vector === -func funcFloor(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Floor) +// === floor(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcFloor(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Floor), nil } -// === exp(Vector parser.ValueTypeVector) Vector === -func funcExp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Exp) +// === exp(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcExp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Exp), nil } -// === sqrt(Vector VectorNode) Vector === -func funcSqrt(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Sqrt) +// === sqrt(Vector VectorNode) (Vector, Annotations) === +func funcSqrt(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Sqrt), nil } -// === ln(Vector parser.ValueTypeVector) Vector === -func funcLn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Log) +// === ln(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcLn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Log), nil } -// === log2(Vector parser.ValueTypeVector) Vector === -func funcLog2(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Log2) +// === log2(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcLog2(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Log2), nil } -// === log10(Vector parser.ValueTypeVector) Vector === -func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Log10) +// === log10(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Log10), nil } -// === sin(Vector parser.ValueTypeVector) Vector === -func funcSin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Sin) +// === sin(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcSin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Sin), nil } -// === cos(Vector parser.ValueTypeVector) Vector === -func funcCos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Cos) +// === cos(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcCos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Cos), nil } -// === tan(Vector parser.ValueTypeVector) Vector === -func funcTan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Tan) +// === tan(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcTan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Tan), nil } -// == asin(Vector parser.ValueTypeVector) Vector === -func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Asin) +// == asin(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Asin), nil } -// == acos(Vector parser.ValueTypeVector) Vector === -func funcAcos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Acos) +// == acos(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAcos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Acos), nil } -// == atan(Vector parser.ValueTypeVector) Vector === -func funcAtan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Atan) +// == atan(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAtan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Atan), nil } -// == sinh(Vector parser.ValueTypeVector) Vector === -func funcSinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Sinh) +// == sinh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcSinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Sinh), nil } -// == cosh(Vector parser.ValueTypeVector) Vector === -func funcCosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Cosh) +// == cosh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcCosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Cosh), nil } -// == tanh(Vector parser.ValueTypeVector) Vector === -func funcTanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Tanh) +// == tanh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcTanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Tanh), nil } -// == asinh(Vector parser.ValueTypeVector) Vector === -func funcAsinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Asinh) +// == asinh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAsinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Asinh), nil } -// == acosh(Vector parser.ValueTypeVector) Vector === -func funcAcosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Acosh) +// == acosh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAcosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Acosh), nil } -// == atanh(Vector parser.ValueTypeVector) Vector === -func funcAtanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Atanh) +// == atanh(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcAtanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return simpleFunc(vals, enh, math.Atanh), nil } -// === rad(Vector parser.ValueTypeVector) Vector === -func funcRad(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === rad(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcRad(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, func(v float64) float64 { return v * math.Pi / 180 - }) + }), nil } -// === deg(Vector parser.ValueTypeVector) Vector === -func funcDeg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === deg(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcDeg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, func(v float64) float64 { return v * 180 / math.Pi - }) + }), nil } // === pi() Scalar === -func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return Vector{Sample{F: math.Pi}} +func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return Vector{Sample{F: math.Pi}}, nil } -// === sgn(Vector parser.ValueTypeVector) Vector === -func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === sgn(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, func(v float64) float64 { switch { case v < 0: @@ -850,11 +866,11 @@ func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) default: return v } - }) + }), nil } -// === timestamp(Vector parser.ValueTypeVector) Vector === -func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === timestamp(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) for _, el := range vec { enh.Out = append(enh.Out, Sample{ @@ -862,7 +878,7 @@ func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe F: float64(el.T) / 1000, }) } - return enh.Out + return enh.Out, nil } func kahanSum(samples []float64) float64 { @@ -931,39 +947,39 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f return slope, intercept } -// === deriv(node parser.ValueTypeMatrix) Vector === -func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === deriv(node parser.ValueTypeMatrix) (Vector, Annotations) === +func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] // No sense in trying to compute a derivative without at least two points. // Drop this Vector element. if len(samples.Floats) < 2 { - return enh.Out + return enh.Out, nil } // We pass in an arbitrary timestamp that is near the values in use // to avoid floating point accuracy issues, see // https://github.com/prometheus/prometheus/issues/2674 slope, _ := linearRegression(samples.Floats, samples.Floats[0].T) - return append(enh.Out, Sample{F: slope}) + return append(enh.Out, Sample{F: slope}), nil } -// === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) Vector === -func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) (Vector, Annotations) === +func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] duration := vals[1].(Vector)[0].F // No sense in trying to predict anything without at least two points. // Drop this Vector element. if len(samples.Floats) < 2 { - return enh.Out + return enh.Out, nil } slope, intercept := linearRegression(samples.Floats, enh.Ts) - return append(enh.Out, Sample{F: slope*duration + intercept}) + return append(enh.Out, Sample{F: slope*duration + intercept}), nil } -// === histogram_count(Vector parser.ValueTypeVector) Vector === -func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === histogram_count(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { inVec := vals[0].(Vector) for _, sample := range inVec { @@ -976,11 +992,11 @@ func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalN F: sample.H.Count, }) } - return enh.Out + return enh.Out, nil } -// === histogram_sum(Vector parser.ValueTypeVector) Vector === -func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === histogram_sum(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { inVec := vals[0].(Vector) for _, sample := range inVec { @@ -993,11 +1009,77 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod F: sample.H.Sum, }) } - return enh.Out + return enh.Out, nil } -// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === -func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + mean := sample.H.Sum / sample.H.Count + var variance, cVariance float64 + it := sample.H.AllBucketIterator() + for it.Next() { + bucket := it.At() + var val float64 + if bucket.Lower <= 0 && 0 <= bucket.Upper { + val = 0 + } else { + val = math.Sqrt(bucket.Upper * bucket.Lower) + } + delta := val - mean + variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance) + } + variance += cVariance + variance /= sample.H.Count + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + F: math.Sqrt(variance), + }) + } + return enh.Out, nil +} + +// === histogram_stdvar(Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + mean := sample.H.Sum / sample.H.Count + var variance, cVariance float64 + it := sample.H.AllBucketIterator() + for it.Next() { + bucket := it.At() + var val float64 + if bucket.Lower <= 0 && 0 <= bucket.Upper { + val = 0 + } else { + val = math.Sqrt(bucket.Upper * bucket.Lower) + } + delta := val - mean + variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance) + } + variance += cVariance + variance /= sample.H.Count + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + F: variance, + }) + } + return enh.Out, nil +} + +// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { lower := vals[0].(Vector)[0].F upper := vals[1].(Vector)[0].F inVec := vals[2].(Vector) @@ -1012,13 +1094,18 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev F: histogramFraction(lower, upper, sample.H), }) } - return enh.Out + return enh.Out, nil } -// === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === -func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === +func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { q := vals[0].(Vector)[0].F inVec := vals[1].(Vector) + annos := annotations.Annotations{} + + if math.IsNaN(q) || q < 0 || q > 1 { + annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange())) + } if enh.signatureToMetricWithBuckets == nil { enh.signatureToMetricWithBuckets = map[string]*metricWithBuckets{} @@ -1042,8 +1129,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev sample.Metric.Get(model.BucketLabel), 64, ) if err != nil { - // Oops, no bucket label or malformed label value. Skip. - // TODO(beorn7): Issue a warning somehow. + annos.Add(annotations.NewBadBucketLabelWarning(sample.Metric.Get(labels.MetricName), sample.Metric.Get(model.BucketLabel), args[1].PositionRange())) continue } enh.lblBuf = sample.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel) @@ -1069,7 +1155,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev // At this data point, we have conventional histogram // buckets and a native histogram with the same name and // labels. Do not evaluate anything. - // TODO(beorn7): Issue a warning somehow. + annos.Add(annotations.NewMixedClassicNativeHistogramsWarning(sample.Metric.Get(labels.MetricName), args[1].PositionRange())) delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf)) continue } @@ -1082,18 +1168,22 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev for _, mb := range enh.signatureToMetricWithBuckets { if len(mb.buckets) > 0 { + res, forcedMonotonicity := bucketQuantile(q, mb.buckets) enh.Out = append(enh.Out, Sample{ Metric: mb.metric, - F: bucketQuantile(q, mb.buckets), + F: res, }) + if forcedMonotonicity { + annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(mb.metric.Get(labels.MetricName), args[1].PositionRange())) + } } } - return enh.Out + return enh.Out, annos } -// === resets(Matrix parser.ValueTypeMatrix) Vector === -func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { floats := vals[0].(Matrix)[0].Floats histograms := vals[0].(Matrix)[0].Histograms resets := 0 @@ -1120,17 +1210,17 @@ func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe } } - return append(enh.Out, Sample{F: float64(resets)}) + return append(enh.Out, Sample{F: float64(resets)}), nil } -// === changes(Matrix parser.ValueTypeMatrix) Vector === -func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { floats := vals[0].(Matrix)[0].Floats changes := 0 if len(floats) == 0 { // TODO(beorn7): Only histogram values, still need to add support. - return enh.Out + return enh.Out, nil } prev := floats[0].F @@ -1142,11 +1232,11 @@ func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelp prev = current } - return append(enh.Out, Sample{F: float64(changes)}) + return append(enh.Out, Sample{F: float64(changes)}), nil } -// === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) Vector === -func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) (Vector, Annotations) === +func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { var ( vector = vals[0].(Vector) dst = stringFromArg(args[1]) @@ -1197,20 +1287,20 @@ func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNod H: el.H, }) } - return enh.Out + return enh.Out, nil } -// === Vector(s Scalar) Vector === -func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === Vector(s Scalar) (Vector, Annotations) === +func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{ Metric: labels.Labels{}, F: vals[0].(Vector)[0].F, - }) + }), nil } -// === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) Vector === -func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +// === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) (Vector, Annotations) === +func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { var ( vector = vals[0].(Vector) dst = stringFromArg(args[1]) @@ -1265,7 +1355,7 @@ func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe H: el.H, }) } - return enh.Out + return enh.Out, nil } // Common code for date related functions. @@ -1289,59 +1379,59 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo } // === days_in_month(v Vector) Scalar === -func funcDaysInMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcDaysInMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(32 - time.Date(t.Year(), t.Month(), 32, 0, 0, 0, 0, time.UTC).Day()) - }) + }), nil } // === day_of_month(v Vector) Scalar === -func funcDayOfMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcDayOfMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Day()) - }) + }), nil } // === day_of_week(v Vector) Scalar === -func funcDayOfWeek(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcDayOfWeek(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Weekday()) - }) + }), nil } // === day_of_year(v Vector) Scalar === -func funcDayOfYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcDayOfYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.YearDay()) - }) + }), nil } // === hour(v Vector) Scalar === -func funcHour(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcHour(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Hour()) - }) + }), nil } // === minute(v Vector) Scalar === -func funcMinute(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcMinute(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Minute()) - }) + }), nil } // === month(v Vector) Scalar === -func funcMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Month()) - }) + }), nil } // === year(v Vector) Scalar === -func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { +func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Year()) - }) + }), nil } // FunctionCalls is a list of all functions supported by PromQL, including their types. @@ -1377,6 +1467,8 @@ var FunctionCalls = map[string]FunctionCall{ "histogram_fraction": funcHistogramFraction, "histogram_quantile": funcHistogramQuantile, "histogram_sum": funcHistogramSum, + "histogram_stddev": funcHistogramStdDev, + "histogram_stdvar": funcHistogramStdVar, "holt_winters": funcHoltWinters, "hour": funcHour, "idelta": funcIdelta, diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go index 86f1394998..58136266fd 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go @@ -20,6 +20,8 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + + "github.com/prometheus/prometheus/promql/parser/posrange" ) // Node is a generic interface for all nodes in an AST. @@ -45,7 +47,7 @@ type Node interface { Pretty(level int) string // PositionRange returns the position of the AST Node in the query string. - PositionRange() PositionRange + PositionRange() posrange.PositionRange } // Statement is a generic interface for all statements. @@ -94,7 +96,7 @@ type AggregateExpr struct { Param Expr // Parameter used by some aggregators. Grouping []string // The labels by which to group the Vector. Without bool // Whether to drop the given labels rather than keep them. - PosRange PositionRange + PosRange posrange.PositionRange } // BinaryExpr represents a binary expression between two child expressions. @@ -115,7 +117,7 @@ type Call struct { Func *Function // The function that was called. Args Expressions // Arguments used in the call. - PosRange PositionRange + PosRange posrange.PositionRange } // MatrixSelector represents a Matrix selection. @@ -125,7 +127,7 @@ type MatrixSelector struct { VectorSelector Expr Range time.Duration - EndPos Pos + EndPos posrange.Pos } // SubqueryExpr represents a subquery. @@ -143,27 +145,27 @@ type SubqueryExpr struct { StartOrEnd ItemType // Set when @ is used with start() or end() Step time.Duration - EndPos Pos + EndPos posrange.Pos } // NumberLiteral represents a number. type NumberLiteral struct { Val float64 - PosRange PositionRange + PosRange posrange.PositionRange } // ParenExpr wraps an expression so it cannot be disassembled as a consequence // of operator precedence. type ParenExpr struct { Expr Expr - PosRange PositionRange + PosRange posrange.PositionRange } // StringLiteral represents a string. type StringLiteral struct { Val string - PosRange PositionRange + PosRange posrange.PositionRange } // UnaryExpr represents a unary operation on another expression. @@ -172,7 +174,7 @@ type UnaryExpr struct { Op ItemType Expr Expr - StartPos Pos + StartPos posrange.Pos } // StepInvariantExpr represents a query which evaluates to the same result @@ -184,7 +186,9 @@ type StepInvariantExpr struct { func (e *StepInvariantExpr) String() string { return e.Expr.String() } -func (e *StepInvariantExpr) PositionRange() PositionRange { return e.Expr.PositionRange() } +func (e *StepInvariantExpr) PositionRange() posrange.PositionRange { + return e.Expr.PositionRange() +} // VectorSelector represents a Vector selection. type VectorSelector struct { @@ -204,7 +208,7 @@ type VectorSelector struct { UnexpandedSeriesSet storage.SeriesSet Series []storage.Series - PosRange PositionRange + PosRange posrange.PositionRange } // TestStmt is an internal helper statement that allows execution @@ -215,8 +219,8 @@ func (TestStmt) String() string { return "test statement" } func (TestStmt) PromQLStmt() {} func (t TestStmt) Pretty(int) string { return t.String() } -func (TestStmt) PositionRange() PositionRange { - return PositionRange{ +func (TestStmt) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: -1, End: -1, } @@ -405,17 +409,11 @@ func Children(node Node) []Node { } } -// PositionRange describes a position in the input string of the parser. -type PositionRange struct { - Start Pos - End Pos -} - // mergeRanges is a helper function to merge the PositionRanges of two Nodes. // Note that the arguments must be in the same order as they // occur in the input string. -func mergeRanges(first, last Node) PositionRange { - return PositionRange{ +func mergeRanges(first, last Node) posrange.PositionRange { + return posrange.PositionRange{ Start: first.PositionRange().Start, End: last.PositionRange().End, } @@ -423,33 +421,33 @@ func mergeRanges(first, last Node) PositionRange { // Item implements the Node interface. // This makes it possible to call mergeRanges on them. -func (i *Item) PositionRange() PositionRange { - return PositionRange{ +func (i *Item) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: i.Pos, - End: i.Pos + Pos(len(i.Val)), + End: i.Pos + posrange.Pos(len(i.Val)), } } -func (e *AggregateExpr) PositionRange() PositionRange { +func (e *AggregateExpr) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *BinaryExpr) PositionRange() PositionRange { +func (e *BinaryExpr) PositionRange() posrange.PositionRange { return mergeRanges(e.LHS, e.RHS) } -func (e *Call) PositionRange() PositionRange { +func (e *Call) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *EvalStmt) PositionRange() PositionRange { +func (e *EvalStmt) PositionRange() posrange.PositionRange { return e.Expr.PositionRange() } -func (e Expressions) PositionRange() PositionRange { +func (e Expressions) PositionRange() posrange.PositionRange { if len(e) == 0 { // Position undefined. - return PositionRange{ + return posrange.PositionRange{ Start: -1, End: -1, } @@ -457,39 +455,39 @@ func (e Expressions) PositionRange() PositionRange { return mergeRanges(e[0], e[len(e)-1]) } -func (e *MatrixSelector) PositionRange() PositionRange { - return PositionRange{ +func (e *MatrixSelector) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: e.VectorSelector.PositionRange().Start, End: e.EndPos, } } -func (e *SubqueryExpr) PositionRange() PositionRange { - return PositionRange{ +func (e *SubqueryExpr) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: e.Expr.PositionRange().Start, End: e.EndPos, } } -func (e *NumberLiteral) PositionRange() PositionRange { +func (e *NumberLiteral) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *ParenExpr) PositionRange() PositionRange { +func (e *ParenExpr) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *StringLiteral) PositionRange() PositionRange { +func (e *StringLiteral) PositionRange() posrange.PositionRange { return e.PosRange } -func (e *UnaryExpr) PositionRange() PositionRange { - return PositionRange{ +func (e *UnaryExpr) PositionRange() posrange.PositionRange { + return posrange.PositionRange{ Start: e.StartPos, End: e.Expr.PositionRange().End, } } -func (e *VectorSelector) PositionRange() PositionRange { +func (e *VectorSelector) PositionRange() posrange.PositionRange { return e.PosRange } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go index 479c7f635d..45a30219e6 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go @@ -173,6 +173,16 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeVector, }, + "histogram_stddev": { + Name: "histogram_stddev", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "histogram_stdvar": { + Name: "histogram_stdvar", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, "histogram_fraction": { Name: "histogram_fraction", ArgTypes: []ValueType{ValueTypeScalar, ValueTypeScalar, ValueTypeVector}, diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y index b28e9d544c..676fd9fb5b 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y @@ -21,23 +21,29 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/promql/parser/posrange" ) %} %union { - node Node - item Item - matchers []*labels.Matcher - matcher *labels.Matcher - label labels.Label - labels labels.Labels - lblList []labels.Label - strings []string - series []SequenceValue - uint uint64 - float float64 - duration time.Duration + node Node + item Item + matchers []*labels.Matcher + matcher *labels.Matcher + label labels.Label + labels labels.Labels + lblList []labels.Label + strings []string + series []SequenceValue + histogram *histogram.FloatHistogram + descriptors map[string]interface{} + bucket_set []float64 + int int64 + uint uint64 + float float64 + duration time.Duration } @@ -54,6 +60,8 @@ IDENTIFIER LEFT_BRACE LEFT_BRACKET LEFT_PAREN +OPEN_HIST +CLOSE_HIST METRIC_IDENTIFIER NUMBER RIGHT_BRACE @@ -64,6 +72,20 @@ SPACE STRING TIMES +// Histogram Descriptors. +%token histogramDescStart +%token +SUM_DESC +COUNT_DESC +SCHEMA_DESC +OFFSET_DESC +NEGATIVE_OFFSET_DESC +BUCKETS_DESC +NEGATIVE_BUCKETS_DESC +ZERO_BUCKET_DESC +ZERO_BUCKET_WIDTH_DESC +%token histogramDescEnd + // Operators. %token operatorsStart %token @@ -145,6 +167,10 @@ START_METRIC_SELECTOR %type