Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(#2598): Support to query QueryRange non-delta profiles buckets from FrostDB #3789

Draft
wants to merge 3 commits into
base: main
Choose a base branch
from
Draft
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 17 additions & 7 deletions pkg/parcacol/querier.go
Original file line number Diff line number Diff line change
Expand Up @@ -344,6 +344,7 @@
ColumnPeriodSum = "sum(" + profile.ColumnPeriod + ")"
ColumnValueCount = "count(" + profile.ColumnValue + ")"
ColumnValueSum = "sum(" + profile.ColumnValue + ")"
ColumnValueFirst = "first(" + profile.ColumnValue + ")"
)

func (q *Querier) queryRangeDelta(ctx context.Context, filterExpr logicalplan.Expr, step time.Duration, sampleTypeUnit string) ([]*pb.MetricsSeries, error) {
Expand Down Expand Up @@ -531,13 +532,22 @@
Filter(filterExpr).
Aggregate(
[]logicalplan.Expr{
logicalplan.Sum(logicalplan.Col(profile.ColumnValue)),
logicalplan.Sum(logicalplan.Col(profile.ColumnValue)).Alias(ColumnValueFirst),
},
[]logicalplan.Expr{
logicalplan.DynCol(profile.ColumnLabels),
logicalplan.Col(profile.ColumnTimestamp),
},
).
Aggregate(
[]logicalplan.Expr{
logicalplan.Take(logicalplan.Col(profile.ColumnValue), 1).Alias(ColumnValueFirst),

Check failure on line 544 in pkg/parcacol/querier.go

View workflow job for this annotation

GitHub Actions / Test on amd64

undefined: logicalplan.Take

Check failure on line 544 in pkg/parcacol/querier.go

View workflow job for this annotation

GitHub Actions / Build binary using goreleaser

undefined: logicalplan.Take

Check failure on line 544 in pkg/parcacol/querier.go

View workflow job for this annotation

GitHub Actions / docs

undefined: logicalplan.Take

Check failure on line 544 in pkg/parcacol/querier.go

View workflow job for this annotation

GitHub Actions / Release (Dry Run)

undefined: logicalplan.Take

Check failure on line 544 in pkg/parcacol/querier.go

View workflow job for this annotation

GitHub Actions / Analyze (go)

undefined: logicalplan.Take

Check failure on line 544 in pkg/parcacol/querier.go

View workflow job for this annotation

GitHub Actions / Build Snap (goreleaser)

undefined: logicalplan.Take

Check failure on line 544 in pkg/parcacol/querier.go

View workflow job for this annotation

GitHub Actions / Go Lint

undefined: logicalplan.Take
},
[]logicalplan.Expr{
logicalplan.DynCol(profile.ColumnLabels),
logicalplan.Duration(1000 * time.Millisecond),
},
).
Execute(ctx, func(ctx context.Context, r arrow.Record) error {
r.Retain()
records = append(records, r)
Expand All @@ -561,7 +571,7 @@
// Add necessary columns and their found value is false by default.
columnIndices := map[string]columnIndex{
profile.ColumnTimestamp: {},
ColumnValueSum: {},
ColumnValueFirst: {},
}
labelColumnIndices := []int{}
labelSet := labels.Labels{}
Expand Down Expand Up @@ -623,17 +633,17 @@
}

ts := ar.Column(columnIndices[profile.ColumnTimestamp].index).(*array.Int64).Value(i)
value := ar.Column(columnIndices[ColumnValueSum].index).(*array.Int64).Value(i)

// value := ar.Column(columnIndices[ColumnValueFirst].index).(*array.Int64).Value(i)
valueList := ar.Column(columnIndices[ColumnValueFirst].index).(*array.List)
start, _ := valueList.ValueOffsets(i)
value := valueList.ListValues().(*array.Int64).Value(int(start))

// Each step bucket will only return one of the timestamps and its value.
// For this reason we'll take each timestamp and divide it by the step seconds.
// If we have seen a MetricsSample for this bucket before, we'll ignore this one.
// If we haven't seen one we'll add this sample to the response.

// TODO: This still queries way too much data from the underlying database.
// This needs to be moved to FrostDB to not even query all of this data in the first place.
// With a scrape interval of 10s and a query range of 1d we'd query 8640 samples and at most return 960.
// Even worse for a week, we'd query 60480 samples and only return 1000.
tsBucket := ts / 1000 / int64(step.Seconds())
if _, found := resSeriesBuckets[index][tsBucket]; found {
// We already have a MetricsSample for this timestamp bucket, ignore it.
Expand Down
Loading