Skip to content

Commit

Permalink
Put ingester disable chunk trimming a feature flag (#6300)
Browse files Browse the repository at this point in the history
  • Loading branch information
yeya24 authored Nov 1, 2024
1 parent ee87dff commit 4063773
Show file tree
Hide file tree
Showing 4 changed files with 42 additions and 35 deletions.
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
* [ENHANCEMENT] S3 Bucket Client: Add a list objects version configs to configure list api object version. #6280
* [ENHANCEMENT] OpenStack Swift: Add application credential configs for Openstack swift object storage backend. #6255
* [ENHANCEMENT] Query Frontend: Add new query stats metrics `cortex_query_samples_scanned_total` and `cortex_query_peak_samples` to track scannedSamples and peakSample per user. #6228
* [ENHANCEMENT] Ingester: Disable chunk trimming. #6270
* [ENHANCEMENT] Ingester: Add option `ingester.disable-chunk-trimming` to disable chunk trimming. #6300
* [ENHANCEMENT] Ingester: Add `blocks-storage.tsdb.wal-compression-type` to support zstd wal compression type. #6232
* [ENHANCEMENT] Query Frontend: Add info field to query response. #6207
* [ENHANCEMENT] Query Frontend: Add peakSample in query stats response. #6188
Expand Down
7 changes: 7 additions & 0 deletions docs/configuration/config-file-reference.md
Original file line number Diff line number Diff line change
Expand Up @@ -3073,6 +3073,13 @@ instance_limits:
# Experimental: Enable string interning for metrics labels.
# CLI flag: -ingester.labels-string-interning-enabled
[labels_string_interning_enabled: <boolean> | default = false]
# Disable trimming of matching series chunks based on query Start and End time.
# When disabled, the result may contain samples outside the queried time range
# but select performances may be improved. Note that certain query results might
# change by changing this option.
# CLI flag: -ingester.disable-chunk-trimming
[disable_chunk_trimming: <boolean> | default = false]
```

### `ingester_client_config`
Expand Down
59 changes: 26 additions & 33 deletions integration/query_fuzz_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/promql/parser"
"github.com/stretchr/testify/require"
"github.com/thanos-io/thanos/pkg/block"
"github.com/thanos-io/thanos/pkg/block/metadata"
Expand All @@ -37,7 +38,6 @@ import (
)

func TestDisableChunkTrimmingFuzz(t *testing.T) {
noneChunkTrimmingImage := "quay.io/cortexproject/cortex:v1.18.0"
s, err := e2e.NewScenario(networkName)
require.NoError(t, err)
defer s.Close()
Expand All @@ -47,31 +47,7 @@ func TestDisableChunkTrimmingFuzz(t *testing.T) {
consul2 := e2edb.NewConsulWithName("consul2")
require.NoError(t, s.StartAndWaitReady(consul1, consul2))

flags1 := mergeFlags(
AlertmanagerLocalFlags(),
map[string]string{
"-store.engine": blocksStorageEngine,
"-blocks-storage.backend": "filesystem",
"-blocks-storage.tsdb.head-compaction-interval": "4m",
"-blocks-storage.tsdb.block-ranges-period": "2h",
"-blocks-storage.tsdb.ship-interval": "1h",
"-blocks-storage.bucket-store.sync-interval": "15m",
"-blocks-storage.tsdb.retention-period": "2h",
"-blocks-storage.bucket-store.index-cache.backend": tsdb.IndexCacheBackendInMemory,
"-blocks-storage.bucket-store.bucket-index.enabled": "true",
"-querier.query-store-for-labels-enabled": "true",
// Ingester.
"-ring.store": "consul",
"-consul.hostname": consul1.NetworkHTTPEndpoint(),
// Distributor.
"-distributor.replication-factor": "1",
// Store-gateway.
"-store-gateway.sharding-enabled": "false",
// alert manager
"-alertmanager.web.external-url": "http://localhost/alertmanager",
},
)
flags2 := mergeFlags(
flags := mergeFlags(
AlertmanagerLocalFlags(),
map[string]string{
"-store.engine": blocksStorageEngine,
Expand All @@ -85,8 +61,7 @@ func TestDisableChunkTrimmingFuzz(t *testing.T) {
"-blocks-storage.bucket-store.bucket-index.enabled": "true",
"-querier.query-store-for-labels-enabled": "true",
// Ingester.
"-ring.store": "consul",
"-consul.hostname": consul2.NetworkHTTPEndpoint(),
"-ring.store": "consul",
// Distributor.
"-distributor.replication-factor": "1",
// Store-gateway.
Expand All @@ -95,17 +70,26 @@ func TestDisableChunkTrimmingFuzz(t *testing.T) {
"-alertmanager.web.external-url": "http://localhost/alertmanager",
},
)

// make alert manager config dir
require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{}))

path1 := path.Join(s.SharedDir(), "cortex-1")
path2 := path.Join(s.SharedDir(), "cortex-2")

flags1 = mergeFlags(flags1, map[string]string{"-blocks-storage.filesystem.dir": path1})
flags2 = mergeFlags(flags2, map[string]string{"-blocks-storage.filesystem.dir": path2})
flags1 := mergeFlags(flags, map[string]string{
"-blocks-storage.filesystem.dir": path1,
"-consul.hostname": consul1.NetworkHTTPEndpoint(),
})
// Disable chunk trimming for Cortex 2.
flags2 := mergeFlags(flags, map[string]string{
"-blocks-storage.filesystem.dir": path2,
"-consul.hostname": consul2.NetworkHTTPEndpoint(),
"-ingester.disable-chunk-trimming": "true",
})
// Start Cortex replicas.
cortex1 := e2ecortex.NewSingleBinary("cortex-1", flags1, "")
cortex2 := e2ecortex.NewSingleBinary("cortex-2", flags2, noneChunkTrimmingImage)
cortex2 := e2ecortex.NewSingleBinary("cortex-2", flags2, "")
require.NoError(t, s.StartAndWaitReady(cortex1, cortex2))

// Wait until Cortex replicas have updated the ring state.
Expand Down Expand Up @@ -162,9 +146,18 @@ func TestDisableChunkTrimmingFuzz(t *testing.T) {
queryEnd := time.Now().Add(-time.Minute * 20)
cases := make([]*testCase, 0, 200)
testRun := 500
var (
expr parser.Expr
query string
)
for i := 0; i < testRun; i++ {
expr := ps.WalkRangeQuery()
query := expr.Pretty(0)
for {
expr = ps.WalkRangeQuery()
query = expr.Pretty(0)
if !strings.Contains(query, "timestamp") {
break
}
}
res1, err1 := c1.QueryRange(query, queryStart, queryEnd, scrapeInterval)
res2, err2 := c2.QueryRange(query, queryStart, queryEnd, scrapeInterval)
cases = append(cases, &testCase{
Expand Down
9 changes: 8 additions & 1 deletion pkg/ingester/ingester.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,11 @@ type Config struct {
AdminLimitMessage string `yaml:"admin_limit_message"`

LabelsStringInterningEnabled bool `yaml:"labels_string_interning_enabled"`

// DisableChunkTrimming allows to disable trimming of matching series chunks based on query Start and End time.
// When disabled, the result may contain samples outside the queried time range but Select() performances
// may be improved.
DisableChunkTrimming bool `yaml:"disable_chunk_trimming"`
}

// RegisterFlags adds the flags required to config this to the given FlagSet
Expand All @@ -163,6 +168,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.StringVar(&cfg.AdminLimitMessage, "ingester.admin-limit-message", "please contact administrator to raise it", "Customize the message contained in limit errors")

f.BoolVar(&cfg.LabelsStringInterningEnabled, "ingester.labels-string-interning-enabled", false, "Experimental: Enable string interning for metrics labels.")

f.BoolVar(&cfg.DisableChunkTrimming, "ingester.disable-chunk-trimming", false, "Disable trimming of matching series chunks based on query Start and End time. When disabled, the result may contain samples outside the queried time range but select performances may be improved. Note that certain query results might change by changing this option.")
}

func (cfg *Config) Validate() error {
Expand Down Expand Up @@ -1985,7 +1992,7 @@ func (i *Ingester) queryStreamChunks(ctx context.Context, db *userTSDB, from, th
hints := &storage.SelectHints{
Start: from,
End: through,
DisableTrimming: true,
DisableTrimming: i.cfg.DisableChunkTrimming,
}
// It's not required to return sorted series because series are sorted by the Cortex querier.
ss := q.Select(ctx, false, hints, matchers...)
Expand Down

0 comments on commit 4063773

Please sign in to comment.