Skip to content

Commit

Permalink
Added negative offset check for caching queries (#5719)
Browse files Browse the repository at this point in the history
Signed-off-by: pawarpranav83 <[email protected]>
Co-authored-by: pawarpranav83 <[email protected]>
  • Loading branch information
pawarpranav83 and pawarpranav83 authored Dec 28, 2023
1 parent a1b1954 commit 53fa64d
Show file tree
Hide file tree
Showing 3 changed files with 84 additions and 2 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
* [ENHANCEMENT] Index Cache: Multi level cache adds config `max_backfill_items` to cap max items to backfill per async operation. #5686
* [ENHANCEMENT] Query Frontend: Log number of split queries in `query stats` log. #5703
* [BUGFIX] Distributor: Do not use label with empty values for sharding #5717
* [BUGFIX] Query Frontend: queries with negative offset should check whether it is cacheable or not. #5719


## 1.16.0 2023-11-20
Expand Down
46 changes: 44 additions & 2 deletions pkg/querier/tripperware/queryrange/results_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -265,12 +265,13 @@ func (s resultsCache) shouldCacheResponse(ctx context.Context, req tripperware.R
if !s.isAtModifierCachable(ctx, req, maxCacheTime) {
return false
}
if !s.isOffsetCachable(ctx, req) {
return false
}

return true
}

var errAtModifierAfterEnd = errors.New("at modifier after end")

// isAtModifierCachable returns true if the @ modifier result
// is safe to cache.
func (s resultsCache) isAtModifierCachable(ctx context.Context, r tripperware.Request, maxCacheTime int64) bool {
Expand All @@ -280,6 +281,7 @@ func (s resultsCache) isAtModifierCachable(ctx context.Context, r tripperware.Re
// below maxCacheTime. In such cases if any tenant is intentionally
// playing with old data, we could cache empty result if we look
// beyond query end.
var errAtModifierAfterEnd = errors.New("at modifier after end")
query := r.GetQuery()
if !strings.Contains(query, "@") {
return true
Expand Down Expand Up @@ -321,6 +323,46 @@ func (s resultsCache) isAtModifierCachable(ctx context.Context, r tripperware.Re
return atModCachable
}

// isOffsetCachable returns true if the offset is positive, result is safe to cache.
// and false when offset is negative, result is not cached.
func (s resultsCache) isOffsetCachable(ctx context.Context, r tripperware.Request) bool {
var errNegativeOffset = errors.New("negative offset")
query := r.GetQuery()
if !strings.Contains(query, "offset") {
return true
}
expr, err := parser.ParseExpr(query)
if err != nil {
level.Warn(util_log.WithContext(ctx, s.logger)).Log("msg", "failed to parse query, considering offset as not cachable", "query", query, "err", err)
return false
}

offsetCachable := true
parser.Inspect(expr, func(n parser.Node, _ []parser.Node) error {
switch e := n.(type) {
case *parser.VectorSelector:
if e.OriginalOffset < 0 {
offsetCachable = false
return errNegativeOffset
}
case *parser.MatrixSelector:
offset := e.VectorSelector.(*parser.VectorSelector).OriginalOffset
if offset < 0 {
offsetCachable = false
return errNegativeOffset
}
case *parser.SubqueryExpr:
if e.OriginalOffset < 0 {
offsetCachable = false
return errNegativeOffset
}
}
return nil
})

return offsetCachable
}

func getHeaderValuesWithName(r tripperware.Response, headerName string) (headerValues []string) {
for name, hv := range r.HTTPHeaders() {
if name != headerName {
Expand Down
39 changes: 39 additions & 0 deletions pkg/querier/tripperware/queryrange/results_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -416,6 +416,45 @@ func TestShouldCache(t *testing.T) {
input: tripperware.Response(&PrometheusResponse{}),
expected: false,
},
// offset on vector selectors.
{
name: "positive offset on vector selector",
request: &PrometheusRequest{Query: "metric offset 10ms", End: 125000},
input: tripperware.Response(&PrometheusResponse{}),
expected: true,
},
{
name: "negative offset on vector selector",
request: &PrometheusRequest{Query: "metric offset -10ms", End: 125000},
input: tripperware.Response(&PrometheusResponse{}),
expected: false,
},
// offset on matrix selectors.
{
name: "positive offset on matrix selector",
request: &PrometheusRequest{Query: "rate(metric[5m] offset 10ms)", End: 125000},
input: tripperware.Response(&PrometheusResponse{}),
expected: true,
},
{
name: "negative offset on matrix selector",
request: &PrometheusRequest{Query: "rate(metric[5m] offset -10ms)", End: 125000},
input: tripperware.Response(&PrometheusResponse{}),
expected: false,
},
// offset on subqueries.
{
name: "positive offset on subqueries",
request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] offset 10ms)", End: 125000},
input: tripperware.Response(&PrometheusResponse{}),
expected: true,
},
{
name: "negative offset on subqueries",
request: &PrometheusRequest{Query: "sum_over_time(rate(metric[1m])[10m:1m] offset -10ms)", End: 125000},
input: tripperware.Response(&PrometheusResponse{}),
expected: false,
},
} {
{
t.Run(tc.name, func(t *testing.T) {
Expand Down

0 comments on commit 53fa64d

Please sign in to comment.