diff --git a/pkg/storage/tsdb/multilevel_cache.go b/pkg/storage/tsdb/multilevel_cache.go index 11f5f5287a..16689efc18 100644 --- a/pkg/storage/tsdb/multilevel_cache.go +++ b/pkg/storage/tsdb/multilevel_cache.go @@ -2,14 +2,13 @@ package tsdb import ( "context" - "sync" - "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" storecache "github.com/thanos-io/thanos/pkg/store/cache" + "sync" ) const ( @@ -21,7 +20,8 @@ const ( type multiLevelCache struct { caches []storecache.IndexCache - fetchLatency *prometheus.HistogramVec + fetchLatency *prometheus.HistogramVec + backFillLatency *prometheus.HistogramVec } func (m *multiLevelCache) StorePostings(blockID ulid.ULID, l labels.Label, v []byte, tenant string) { @@ -66,6 +66,8 @@ func (m *multiLevelCache) FetchMultiPostings(ctx context.Context, blockID ulid.U } defer func() { + backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues(cacheTypePostings)) + defer backFillTimer.ObserveDuration() for cache, hit := range backfillMap { for _, values := range hit { for l, b := range values { @@ -104,7 +106,9 @@ func (m *multiLevelCache) FetchExpandedPostings(ctx context.Context, blockID uli } if d, h := c.FetchExpandedPostings(ctx, blockID, matchers, tenant); h { if i > 0 { + backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues(cacheTypeExpandedPostings)) m.caches[i-1].StoreExpandedPostings(blockID, matchers, d, tenant) + backFillTimer.ObserveDuration() } return d, h } @@ -156,6 +160,8 @@ func (m *multiLevelCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULI } defer func() { + backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues(cacheTypeSeries)) + defer backFillTimer.ObserveDuration() for cache, hit := range backfillMap { for _, values := range hit { for m, b := range values { @@ -182,5 +188,10 @@ func newMultiLevelCache(reg prometheus.Registerer, c ...storecache.IndexCache) s Help: "Histogram to track latency to fetch items from multi level index cache", Buckets: []float64{0.01, 0.1, 0.3, 0.6, 1, 3, 6, 10, 15, 20, 25, 30, 40, 50, 60, 90}, }, []string{"item_type"}), + backFillLatency: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ + Name: "cortex_store_multilevel_index_cache_backfill_duration_seconds", + Help: "Histogram to track latency to backfill items from multi level index cache", + Buckets: []float64{0.01, 0.1, 0.3, 0.6, 1, 3, 6, 10, 15, 20, 25, 30, 40, 50, 60, 90}, + }, []string{"item_type"}), } }