Skip to content

Commit

Permalink
Added support PostgreSQL v17 (#37)
Browse files Browse the repository at this point in the history
Co-authored-by: Mikhail Grigorev <[email protected]>
  • Loading branch information
CHERTS and Mikhail Grigorev authored Sep 11, 2024
1 parent 73d21d2 commit ca9803f
Show file tree
Hide file tree
Showing 6 changed files with 162 additions and 39 deletions.
106 changes: 86 additions & 20 deletions internal/collector/postgres_bgwriter.go
Original file line number Diff line number Diff line change
@@ -1,20 +1,32 @@
package collector

import (
"strconv"

"github.com/cherts/pgscv/internal/log"
"github.com/cherts/pgscv/internal/model"
"github.com/cherts/pgscv/internal/store"
"github.com/prometheus/client_golang/prometheus"
"strconv"
)

const (
postgresBgwriterQuery = "SELECT " +
postgresBgwriterQuery16 = "SELECT " +
"checkpoints_timed, checkpoints_req, checkpoint_write_time, checkpoint_sync_time, " +
"buffers_checkpoint, buffers_clean, maxwritten_clean, " +
"buffers_backend, buffers_backend_fsync, buffers_alloc, " +
"coalesce(extract('epoch' from age(now(), stats_reset)), 0) as stats_age_seconds " +
"coalesce(extract('epoch' from age(now(), stats_reset)), 0) as bgwr_stats_age_seconds " +
"FROM pg_stat_bgwriter"

postgresBgwriterQueryLatest = "WITH ckpt AS (" +
"SELECT num_timed AS checkpoints_timed, num_requested AS checkpoints_req, restartpoints_timed, restartpoints_req, " +
"restartpoints_done, write_time AS checkpoint_write_time, sync_time AS checkpoint_sync_time, buffers_written AS buffers_checkpoint, " +
"coalesce(extract('epoch' from age(now(), stats_reset)), 0) as ckpt_stats_age_seconds FROM pg_stat_checkpointer), " +
"bgwr AS (" +
"SELECT buffers_clean, maxwritten_clean, buffers_alloc, " +
"coalesce(extract('epoch' from age(now(), stats_reset)), 0) as bgwr_stats_age_seconds FROM pg_stat_bgwriter), " +
"stat_io AS (" +
"SELECT sum(writes) AS buffers_backend, sum(fsyncs) AS buffers_backend_fsync FROM pg_stat_io WHERE backend_type='background writer') " +
"SELECT ckpt.*, bgwr.*, stat_io.* FROM ckpt, bgwr, stat_io"
)

type postgresBgwriterCollector struct {
Expand Down Expand Up @@ -74,12 +86,36 @@ func NewPostgresBgwriterCollector(constLabels labels, settings model.CollectorSe
nil, constLabels,
settings.Filters,
),
"stats_age_seconds": newBuiltinTypedDesc(
"bgwr_stats_age_seconds": newBuiltinTypedDesc(
descOpts{"postgres", "bgwriter", "stats_age_seconds_total", "The age of the background writer activity statistics, in seconds.", 0},
prometheus.CounterValue,
nil, constLabels,
settings.Filters,
),
"ckpt_stats_age_seconds": newBuiltinTypedDesc(
descOpts{"postgres", "checkpoints", "stats_age_seconds_total", "The age of the checkpointer activity statistics, in seconds (since v17).", 0},
prometheus.CounterValue,
nil, constLabels,
settings.Filters,
),
"checkpoint_restartpointstimed": newBuiltinTypedDesc(
descOpts{"postgres", "checkpoints", "restartpoints_timed", "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it (since v17).", 0},
prometheus.CounterValue,
nil, constLabels,
settings.Filters,
),
"checkpoint_restartpointsreq": newBuiltinTypedDesc(
descOpts{"postgres", "checkpoints", "restartpoints_req", "Number of requested restartpoints (since v17).", 0},
prometheus.CounterValue,
nil, constLabels,
settings.Filters,
),
"checkpoint_restartpointsdone": newBuiltinTypedDesc(
descOpts{"postgres", "checkpoints", "restartpoints_done", "Number of restartpoints that have been performed (since v17).", 0},
prometheus.CounterValue,
nil, constLabels,
settings.Filters,
),
},
}, nil
}
Expand All @@ -92,7 +128,7 @@ func (c *postgresBgwriterCollector) Update(config Config, ch chan<- prometheus.M
}
defer conn.Close()

res, err := conn.Query(postgresBgwriterQuery)
res, err := conn.Query(selectBgwriterQuery(config.serverVersionNum))
if err != nil {
return err
}
Expand Down Expand Up @@ -122,8 +158,16 @@ func (c *postgresBgwriterCollector) Update(config Config, ch chan<- prometheus.M
ch <- desc.newConstMetric(stats.backendFsync)
case "alloc_bytes":
ch <- desc.newConstMetric(stats.backendAllocated * blockSize)
case "stats_age_seconds":
ch <- desc.newConstMetric(stats.statsAgeSeconds)
case "bgwr_stats_age_seconds":
ch <- desc.newConstMetric(stats.bgwrStatsAgeSeconds)
case "ckpt_stats_age_seconds":
ch <- desc.newConstMetric(stats.ckptStatsAgeSeconds)
case "checkpoint_restartpointstimed":
ch <- desc.newConstMetric(stats.ckptRestartpointsTimed)
case "checkpoint_restartpointsreq":
ch <- desc.newConstMetric(stats.ckptRestartpointsReq)
case "checkpoint_restartpointsdone":
ch <- desc.newConstMetric(stats.ckptRestartpointsDone)
default:
log.Debugf("unknown desc name: %s, skip", name)
continue
Expand All @@ -135,17 +179,21 @@ func (c *postgresBgwriterCollector) Update(config Config, ch chan<- prometheus.M

// postgresBgwriterStat describes stats related to Postgres background writes.
type postgresBgwriterStat struct {
ckptTimed float64
ckptReq float64
ckptWriteTime float64
ckptSyncTime float64
ckptBuffers float64
bgwrBuffers float64
bgwrMaxWritten float64
backendBuffers float64
backendFsync float64
backendAllocated float64
statsAgeSeconds float64
ckptTimed float64
ckptReq float64
ckptWriteTime float64
ckptSyncTime float64
ckptBuffers float64
ckptRestartpointsTimed float64
ckptRestartpointsReq float64
ckptRestartpointsDone float64
ckptStatsAgeSeconds float64
bgwrBuffers float64
bgwrMaxWritten float64
backendBuffers float64
backendFsync float64
backendAllocated float64
bgwrStatsAgeSeconds float64
}

// parsePostgresBgwriterStats parses PGResult and returns struct with data values
Expand Down Expand Up @@ -190,8 +238,16 @@ func parsePostgresBgwriterStats(r *model.PGResult) postgresBgwriterStat {
stats.backendFsync = v
case "buffers_alloc":
stats.backendAllocated = v
case "stats_age_seconds":
stats.statsAgeSeconds = v
case "bgwr_stats_age_seconds":
stats.bgwrStatsAgeSeconds = v
case "ckpt_stats_age_seconds":
stats.ckptStatsAgeSeconds = v
case "restartpoints_timed":
stats.ckptRestartpointsTimed = v
case "restartpoints_req":
stats.ckptRestartpointsReq = v
case "restartpoints_done":
stats.ckptRestartpointsDone = v
default:
continue
}
Expand All @@ -200,3 +256,13 @@ func parsePostgresBgwriterStats(r *model.PGResult) postgresBgwriterStat {

return stats
}

// selectBgwriterQuery returns suitable bgwriter/checkpointer query depending on passed version.
func selectBgwriterQuery(version int) string {
switch {
case version < PostgresV17:
return postgresBgwriterQuery16
default:
return postgresBgwriterQueryLatest
}
}
20 changes: 14 additions & 6 deletions internal/collector/postgres_bgwriter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@ package collector

import (
"database/sql"
"github.com/jackc/pgproto3/v2"
"testing"

"github.com/cherts/pgscv/internal/model"
"github.com/jackc/pgproto3/v2"
"github.com/stretchr/testify/assert"
"testing"
)

func TestPostgresBgwriterCollector_Update(t *testing.T) {
Expand All @@ -20,6 +21,10 @@ func TestPostgresBgwriterCollector_Update(t *testing.T) {
"postgres_backends_fsync_total",
"postgres_backends_allocated_bytes_total",
"postgres_bgwriter_stats_age_seconds_total",
"postgres_checkpoints_stats_age_seconds_total",
"postgres_checkpoints_restartpoints_req",
"postgres_checkpoints_restartpoints_done",
"postgres_checkpoints_restartpoints_timed",
},
collector: NewPostgresBgwriterCollector,
service: model.ServiceTypePostgresql,
Expand All @@ -38,27 +43,30 @@ func Test_parsePostgresBgwriterStats(t *testing.T) {
name: "normal output",
res: &model.PGResult{
Nrows: 1,
Ncols: 11,
Ncols: 15,
Colnames: []pgproto3.FieldDescription{
{Name: []byte("checkpoints_timed")}, {Name: []byte("checkpoints_req")},
{Name: []byte("checkpoint_write_time")}, {Name: []byte("checkpoint_sync_time")},
{Name: []byte("buffers_checkpoint")}, {Name: []byte("buffers_clean")}, {Name: []byte("maxwritten_clean")},
{Name: []byte("buffers_backend")}, {Name: []byte("buffers_backend_fsync")}, {Name: []byte("buffers_alloc")},
{Name: []byte("stats_age_seconds")},
{Name: []byte("bgwr_stats_age_seconds")}, {Name: []byte("ckpt_stats_age_seconds")}, {Name: []byte("restartpoints_timed")},
{Name: []byte("restartpoints_req")}, {Name: []byte("restartpoints_done")},
},
Rows: [][]sql.NullString{
{
{String: "55", Valid: true}, {String: "17", Valid: true},
{String: "548425", Valid: true}, {String: "5425", Valid: true},
{String: "5482", Valid: true}, {String: "7584", Valid: true}, {String: "452", Valid: true},
{String: "6895", Valid: true}, {String: "2", Valid: true}, {String: "48752", Valid: true},
{String: "5488", Valid: true},
{String: "5488", Valid: true}, {String: "54388", Valid: true}, {String: "47352", Valid: true},
{String: "5288", Valid: true}, {String: "1438", Valid: true},
},
},
},
want: postgresBgwriterStat{
ckptTimed: 55, ckptReq: 17, ckptWriteTime: 548425, ckptSyncTime: 5425, ckptBuffers: 5482, bgwrBuffers: 7584, bgwrMaxWritten: 452,
backendBuffers: 6895, backendFsync: 2, backendAllocated: 48752, statsAgeSeconds: 5488,
backendBuffers: 6895, backendFsync: 2, backendAllocated: 48752, bgwrStatsAgeSeconds: 5488, ckptStatsAgeSeconds: 54388, ckptRestartpointsTimed: 47352,
ckptRestartpointsReq: 5288, ckptRestartpointsDone: 1438,
},
},
}
Expand Down
6 changes: 4 additions & 2 deletions internal/collector/postgres_common.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,12 @@ package collector

import (
"context"
"strconv"
"strings"

"github.com/cherts/pgscv/internal/log"
"github.com/cherts/pgscv/internal/model"
"github.com/cherts/pgscv/internal/store"
"strconv"
"strings"
)

const (
Expand All @@ -20,6 +21,7 @@ const (
PostgresV14 = 140000
PostgresV15 = 150000
PostgresV16 = 160000
PostgresV17 = 170000

// Minimal required version is 9.5.
PostgresVMinNum = PostgresV95
Expand Down
8 changes: 4 additions & 4 deletions internal/collector/postgres_replication_slots.go
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
package collector

import (
"strconv"
"strings"

"github.com/cherts/pgscv/internal/log"
"github.com/cherts/pgscv/internal/model"
"github.com/cherts/pgscv/internal/store"
"github.com/prometheus/client_golang/prometheus"
"strconv"
"strings"
)

const (
Expand All @@ -17,7 +18,6 @@ const (
postgresReplicationSlotQueryLatest = "SELECT database, slot_name, slot_type, active, pg_current_wal_lsn() - restart_lsn AS since_restart_bytes FROM pg_replication_slots"
)

//
type postgresReplicationSlotCollector struct {
restart typedDesc
}
Expand Down Expand Up @@ -48,7 +48,7 @@ func (c *postgresReplicationSlotCollector) Update(config Config, ch chan<- prome
return err
}

// parse pg_stat_statements stats
// parse pg_replication_slots stats
stats := parsePostgresReplicationSlotStats(res, c.restart.labelNames)

for _, stat := range stats {
Expand Down
55 changes: 50 additions & 5 deletions internal/collector/postgres_statements.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ const (

// postgresStatementsQueryLatest defines query for querying statements metrics.
// 1. use nullif(value, 0) to nullify zero values, NULL are skipped by stats method and metrics wil not be generated.
postgresStatementsQueryLatest = "SELECT d.datname AS database, pg_get_userbyid(p.userid) AS \"user\", p.queryid, " +
postgresStatementsQuery16 = "SELECT d.datname AS database, pg_get_userbyid(p.userid) AS \"user\", p.queryid, " +
"coalesce(%s, '') AS query, p.calls, p.rows, p.total_exec_time, p.total_plan_time, p.blk_read_time, p.blk_write_time, " +
"nullif(p.shared_blks_hit, 0) AS shared_blks_hit, nullif(p.shared_blks_read, 0) AS shared_blks_read, " +
"nullif(p.shared_blks_dirtied, 0) AS shared_blks_dirtied, nullif(p.shared_blks_written, 0) AS shared_blks_written, " +
Expand All @@ -61,7 +61,7 @@ const (
"nullif(p.wal_records, 0) AS wal_records, nullif(p.wal_fpi, 0) AS wal_fpi, nullif(p.wal_bytes, 0) AS wal_bytes " +
"FROM %s.pg_stat_statements p JOIN pg_database d ON d.oid=p.dbid"

postgresStatementsQueryLatestTopK = "WITH stat AS (SELECT d.datname AS DATABASE, pg_get_userbyid(p.userid) AS \"user\", p.queryid, " +
postgresStatementsQuery16TopK = "WITH stat AS (SELECT d.datname AS DATABASE, pg_get_userbyid(p.userid) AS \"user\", p.queryid, " +
"COALESCE(%s, '') AS query, p.calls, p.rows, p.total_exec_time, p.total_plan_time, p.blk_read_time, p.blk_write_time, " +
"NULLIF(p.shared_blks_hit, 0) AS shared_blks_hit, NULLIF(p.shared_blks_read, 0) AS shared_blks_read, " +
"NULLIF(p.shared_blks_dirtied, 0) AS shared_blks_dirtied, NULLIF(p.shared_blks_written, 0) AS shared_blks_written, " +
Expand Down Expand Up @@ -89,6 +89,47 @@ const (
"NULLIF(sum(COALESCE(local_blks_dirtied, 0)), 0), NULLIF(sum(COALESCE(local_blks_written, 0)), 0), NULLIF(sum(COALESCE(temp_blks_read, 0)), 0), " +
"NULLIF(sum(COALESCE(temp_blks_written, 0)), 0), NULLIF(sum(COALESCE(wal_records, 0)), 0), NULLIF(sum(COALESCE(wal_fpi, 0)), 0), " +
"NULLIF(sum(COALESCE(wal_bytes, 0)), 0) FROM stat WHERE NOT visible GROUP BY DATABASE HAVING EXISTS (SELECT 1 FROM stat WHERE NOT visible)"

// postgresStatementsQueryLatest defines query for querying statements metrics.
// 1. use nullif(value, 0) to nullify zero values, NULL are skipped by stats method and metrics wil not be generated.
postgresStatementsQueryLatest = "SELECT d.datname AS database, pg_get_userbyid(p.userid) AS \"user\", p.queryid, " +
"coalesce(%s, '') AS query, p.calls, p.rows, p.total_exec_time, p.total_plan_time, p.shared_blk_read_time AS blk_read_time, " +
"p.shared_blk_write_time AS blk_write_time, nullif(p.shared_blks_hit, 0) AS shared_blks_hit, nullif(p.shared_blks_read, 0) AS shared_blks_read, " +
"nullif(p.shared_blks_dirtied, 0) AS shared_blks_dirtied, nullif(p.shared_blks_written, 0) AS shared_blks_written, " +
"nullif(p.local_blks_hit, 0) AS local_blks_hit, nullif(p.local_blks_read, 0) AS local_blks_read, " +
"nullif(p.local_blks_dirtied, 0) AS local_blks_dirtied, nullif(p.local_blks_written, 0) AS local_blks_written, " +
"nullif(p.temp_blks_read, 0) AS temp_blks_read, nullif(p.temp_blks_written, 0) AS temp_blks_written, " +
"nullif(p.wal_records, 0) AS wal_records, nullif(p.wal_fpi, 0) AS wal_fpi, nullif(p.wal_bytes, 0) AS wal_bytes " +
"FROM %s.pg_stat_statements p JOIN pg_database d ON d.oid=p.dbid"

postgresStatementsQueryLatestTopK = "WITH stat AS (SELECT d.datname AS DATABASE, pg_get_userbyid(p.userid) AS \"user\", p.queryid, " +
"COALESCE(%s, '') AS query, p.calls, p.rows, p.total_exec_time, p.total_plan_time, p.shared_blk_read_time AS blk_read_time, " +
"p.shared_blk_write_time AS blk_write_time, NULLIF(p.shared_blks_hit, 0) AS shared_blks_hit, NULLIF(p.shared_blks_read, 0) AS shared_blks_read, " +
"NULLIF(p.shared_blks_dirtied, 0) AS shared_blks_dirtied, NULLIF(p.shared_blks_written, 0) AS shared_blks_written, " +
"NULLIF(p.local_blks_hit, 0) AS local_blks_hit, NULLIF(p.local_blks_read, 0) AS local_blks_read, " +
"NULLIF(p.local_blks_dirtied, 0) AS local_blks_dirtied, NULLIF(p.local_blks_written, 0) AS local_blks_written, " +
"NULLIF(p.temp_blks_read, 0) AS temp_blks_read, NULLIF(p.temp_blks_written, 0) AS temp_blks_written, " +
"NULLIF(p.wal_records, 0) AS wal_records, NULLIF(p.wal_fpi, 0) AS wal_fpi, NULLIF(p.wal_bytes, 0) AS wal_bytes, " +
"(ROW_NUMBER() OVER ( ORDER BY p.calls DESC NULLS LAST) < $1) OR (ROW_NUMBER() OVER ( ORDER BY p.rows DESC NULLS LAST) < $1) OR " +
"(ROW_NUMBER() OVER ( ORDER BY p.total_exec_time DESC NULLS LAST) < $1) OR (ROW_NUMBER() OVER ( ORDER BY p.total_plan_time DESC NULLS LAST) < $1) OR " +
"(ROW_NUMBER() OVER ( ORDER BY p.shared_blk_read_time DESC NULLS LAST) < $1) OR (ROW_NUMBER() OVER ( ORDER BY p.shared_blk_write_time DESC NULLS LAST) < $1) OR " +
"(ROW_NUMBER() OVER ( ORDER BY p.shared_blks_hit DESC NULLS LAST) < $1) OR (ROW_NUMBER() OVER ( ORDER BY p.shared_blks_read DESC NULLS LAST) < $1) OR " +
"(ROW_NUMBER() OVER ( ORDER BY p.shared_blks_dirtied DESC NULLS LAST) < $1) OR (ROW_NUMBER() OVER ( ORDER BY p.shared_blks_written DESC NULLS LAST) < $1) OR " +
"(ROW_NUMBER() OVER ( ORDER BY p.local_blks_hit DESC NULLS LAST) < $1) OR (ROW_NUMBER() OVER ( ORDER BY p.local_blks_read DESC NULLS LAST) < $1) OR " +
"(ROW_NUMBER() OVER ( ORDER BY p.local_blks_dirtied DESC NULLS LAST) < $1) OR (ROW_NUMBER() OVER ( ORDER BY p.local_blks_written DESC NULLS LAST) < $1) OR " +
"(ROW_NUMBER() OVER ( ORDER BY p.temp_blks_read DESC NULLS LAST) < $1) OR (ROW_NUMBER() OVER ( ORDER BY p.temp_blks_written DESC NULLS LAST) < $1) OR " +
"(ROW_NUMBER() OVER ( ORDER BY p.wal_records DESC NULLS LAST) < $1) OR (ROW_NUMBER() OVER ( ORDER BY p.wal_fpi DESC NULLS LAST) < $1) OR " +
"(ROW_NUMBER() OVER ( ORDER BY p.wal_bytes DESC NULLS LAST) < $1) AS visible FROM %s.pg_stat_statements p JOIN pg_database d ON d.oid = p.dbid) " +
"SELECT DATABASE, \"user\", queryid, query, calls, rows, total_exec_time, total_plan_time, blk_read_time, blk_write_time, shared_blks_hit, " +
"shared_blks_read, shared_blks_dirtied, shared_blks_written, local_blks_hit, local_blks_read, local_blks_dirtied, local_blks_written, " +
"temp_blks_read, temp_blks_written, wal_records, wal_fpi, wal_bytes FROM stat WHERE visible UNION ALL SELECT DATABASE, 'all_users', NULL, " +
"'all_queries', NULLIF(sum(COALESCE(calls, 0)), 0), NULLIF(sum(COALESCE(ROWS, 0)), 0), NULLIF(sum(COALESCE(total_exec_time, 0)), 0), " +
"NULLIF(sum(COALESCE(total_plan_time, 0)), 0), NULLIF(sum(COALESCE(blk_read_time, 0)), 0), NULLIF(sum(COALESCE(blk_write_time, 0)), 0), " +
"NULLIF(sum(COALESCE(shared_blks_hit, 0)), 0), NULLIF(sum(COALESCE(shared_blks_read, 0)), 0), NULLIF(sum(COALESCE(shared_blks_dirtied, 0)), 0), " +
"NULLIF(sum(COALESCE(shared_blks_written, 0)), 0), NULLIF(sum(COALESCE(local_blks_hit, 0)), 0), NULLIF(sum(COALESCE(local_blks_read, 0)), 0), " +
"NULLIF(sum(COALESCE(local_blks_dirtied, 0)), 0), NULLIF(sum(COALESCE(local_blks_written, 0)), 0), NULLIF(sum(COALESCE(temp_blks_read, 0)), 0), " +
"NULLIF(sum(COALESCE(temp_blks_written, 0)), 0), NULLIF(sum(COALESCE(wal_records, 0)), 0), NULLIF(sum(COALESCE(wal_fpi, 0)), 0), " +
"NULLIF(sum(COALESCE(wal_bytes, 0)), 0) FROM stat WHERE NOT visible GROUP BY DATABASE HAVING EXISTS (SELECT 1 FROM stat WHERE NOT visible)"
)

// postgresStatementsCollector ...
Expand Down Expand Up @@ -486,13 +527,17 @@ func selectStatementsQuery(version int, schema string, notrackmode bool, topK in
} else {
query_columm = "p.query"
}
switch {
case version < PostgresV13:
if version < PostgresV13 {
if topK > 0 {
return fmt.Sprintf(postgresStatementsQuery12TopK, query_columm, schema)
}
return fmt.Sprintf(postgresStatementsQuery12, query_columm, schema)
default:
} else if version > PostgresV12 && version < PostgresV17 {
if topK > 0 {
return fmt.Sprintf(postgresStatementsQuery16TopK, query_columm, schema)
}
return fmt.Sprintf(postgresStatementsQuery16, query_columm, schema)
} else {
if topK > 0 {
return fmt.Sprintf(postgresStatementsQueryLatestTopK, query_columm, schema)
}
Expand Down
6 changes: 4 additions & 2 deletions internal/collector/postgres_statements_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -170,8 +170,10 @@ func Test_selectStatementsQuery(t *testing.T) {
}{
{version: PostgresV12, want: fmt.Sprintf(postgresStatementsQuery12, "p.query", "example"), topK: 0},
{version: PostgresV12, want: fmt.Sprintf(postgresStatementsQuery12TopK, "p.query", "example"), topK: 100},
{version: PostgresV13, want: fmt.Sprintf(postgresStatementsQueryLatest, "p.query", "example"), topK: 0},
{version: PostgresV13, want: fmt.Sprintf(postgresStatementsQueryLatestTopK, "p.query", "example"), topK: 100},
{version: PostgresV13, want: fmt.Sprintf(postgresStatementsQuery16, "p.query", "example"), topK: 0},
{version: PostgresV13, want: fmt.Sprintf(postgresStatementsQuery16TopK, "p.query", "example"), topK: 100},
{version: PostgresV17, want: fmt.Sprintf(postgresStatementsQueryLatest, "p.query", "example"), topK: 0},
{version: PostgresV17, want: fmt.Sprintf(postgresStatementsQueryLatestTopK, "p.query", "example"), topK: 100},
}

for _, tc := range testcases {
Expand Down

0 comments on commit ca9803f

Please sign in to comment.