Skip to content

Commit

Permalink
fix(frontend): explain should output actual storage pk rather tha…
Browse files Browse the repository at this point in the history
…n `stream_key` (#16115)
  • Loading branch information
kwannoel authored Apr 3, 2024
1 parent a5d4c55 commit 0f0e3e7
Show file tree
Hide file tree
Showing 46 changed files with 1,144 additions and 1,584 deletions.
103 changes: 44 additions & 59 deletions src/frontend/planner_test/tests/testdata/output/agg.yaml

Large diffs are not rendered by default.

10 changes: 5 additions & 5 deletions src/frontend/planner_test/tests/testdata/output/append_only.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
└─StreamProject { exprs: [t1.v1, max(t1.v2)] }
└─StreamHashAgg [append_only] { group_key: [t1.v1], aggs: [max(t1.v2), count] }
└─StreamExchange { dist: HashShard(t1.v1) }
└─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) }
└─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t1._row_id], pk: [_row_id], dist: UpstreamHashShard(t1._row_id) }
- sql: |
create table t1 (v1 int, v2 int) append only;
create table t2 (v1 int, v3 int) append only;
Expand All @@ -17,17 +17,17 @@
└─StreamExchange { dist: HashShard(t1.v1, t1._row_id, t2._row_id) }
└─StreamHashJoin [append_only] { type: Inner, predicate: t1.v1 = t2.v1, output: [t1.v1, t1.v2, t2.v3, t1._row_id, t2._row_id] }
├─StreamExchange { dist: HashShard(t1.v1) }
│ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) }
│ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t1._row_id], pk: [_row_id], dist: UpstreamHashShard(t1._row_id) }
└─StreamExchange { dist: HashShard(t2.v1) }
└─StreamTableScan { table: t2, columns: [t2.v1, t2.v3, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) }
└─StreamTableScan { table: t2, columns: [t2.v1, t2.v3, t2._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t2._row_id], pk: [_row_id], dist: UpstreamHashShard(t2._row_id) }
- sql: |
create table t1 (v1 int, v2 int) append only;
select v1 from t1 order by v1 limit 3 offset 3;
stream_plan: |-
StreamMaterialize { columns: [v1, t1._row_id(hidden)], stream_key: [t1._row_id], pk_columns: [v1, t1._row_id], pk_conflict: NoCheck }
└─StreamTopN [append_only] { order: [t1.v1 ASC], limit: 3, offset: 3 }
└─StreamExchange { dist: Single }
└─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) }
└─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t1._row_id], pk: [_row_id], dist: UpstreamHashShard(t1._row_id) }
- sql: |
create table t1 (v1 int, v2 int) append only;
select max(v1) as max_v1 from t1;
Expand All @@ -37,4 +37,4 @@
└─StreamSimpleAgg [append_only] { aggs: [max(max(t1.v1)), count] }
└─StreamExchange { dist: Single }
└─StreamStatelessSimpleAgg { aggs: [max(t1.v1)] }
└─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) }
└─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t1._row_id], pk: [_row_id], dist: UpstreamHashShard(t1._row_id) }
18 changes: 9 additions & 9 deletions src/frontend/planner_test/tests/testdata/output/basic_query.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
└─BatchScan { table: t, columns: [t.v1, t.v2], distribution: SomeShard }
stream_plan: |-
StreamMaterialize { columns: [v1, v2, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck }
└─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) }
└─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t._row_id], pk: [_row_id], dist: UpstreamHashShard(t._row_id) }
- sql: |
create table t (v1 bigint, v2 double precision);
select t2.* from t;
Expand All @@ -32,7 +32,7 @@
stream_plan: |-
StreamMaterialize { columns: [t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck }
└─StreamFilter { predicate: true }
└─StreamTableScan { table: t, columns: [t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) }
└─StreamTableScan { table: t, columns: [t._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t._row_id], pk: [_row_id], dist: UpstreamHashShard(t._row_id) }
- sql: |
create table t (v1 int);
select * from t where v1<1;
Expand All @@ -43,7 +43,7 @@
stream_plan: |-
StreamMaterialize { columns: [v1, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck }
└─StreamFilter { predicate: (t.v1 < 1:Int32) }
└─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) }
└─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t._row_id], pk: [_row_id], dist: UpstreamHashShard(t._row_id) }
- name: test boolean expression common factor extraction
sql: |
create table t (v1 Boolean, v2 Boolean, v3 Boolean);
Expand Down Expand Up @@ -97,7 +97,7 @@
└─BatchScan { table: t, columns: [t.v1], distribution: SomeShard }
stream_plan: |-
StreamMaterialize { columns: [v1, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck }
└─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) }
└─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t._row_id], pk: [_row_id], dist: UpstreamHashShard(t._row_id) }
- sql: select 1
batch_plan: 'BatchValues { rows: [[1:Int32]] }'
- sql: |
Expand Down Expand Up @@ -203,22 +203,22 @@
select a, b from mv;
stream_plan: |-
StreamMaterialize { columns: [a, b, mv.t._row_id(hidden)], stream_key: [mv.t._row_id], pk_columns: [mv.t._row_id], pk_conflict: NoCheck }
└─StreamTableScan { table: mv, columns: [mv.a, mv.b, mv.t._row_id], stream_scan_type: ArrangementBackfill, pk: [mv.t._row_id], dist: UpstreamHashShard(mv.t._row_id) }
└─StreamTableScan { table: mv, columns: [mv.a, mv.b, mv.t._row_id], stream_scan_type: ArrangementBackfill, stream_key: [mv.t._row_id], pk: [t._row_id], dist: UpstreamHashShard(mv.t._row_id) }
- sql: |
create table t (v1 int, v2 int);
create materialized view mv(a,b) as select v1+1,v2+1 from t;
select * from mv;
stream_plan: |-
StreamMaterialize { columns: [a, b, mv.t._row_id(hidden)], stream_key: [mv.t._row_id], pk_columns: [mv.t._row_id], pk_conflict: NoCheck }
└─StreamTableScan { table: mv, columns: [mv.a, mv.b, mv.t._row_id], stream_scan_type: ArrangementBackfill, pk: [mv.t._row_id], dist: UpstreamHashShard(mv.t._row_id) }
└─StreamTableScan { table: mv, columns: [mv.a, mv.b, mv.t._row_id], stream_scan_type: ArrangementBackfill, stream_key: [mv.t._row_id], pk: [t._row_id], dist: UpstreamHashShard(mv.t._row_id) }
- sql: |
create table t (id int primary key, col int);
create index idx on t(col);
select id from idx;
stream_plan: |-
StreamMaterialize { columns: [id], stream_key: [id], pk_columns: [id], pk_conflict: NoCheck }
└─StreamExchange { dist: HashShard(idx.id) }
└─StreamTableScan { table: idx, columns: [idx.id], stream_scan_type: ArrangementBackfill, pk: [idx.id], dist: SomeShard }
└─StreamTableScan { table: idx, columns: [idx.id], stream_scan_type: ArrangementBackfill, stream_key: [idx.id], pk: [col, id], dist: SomeShard }
- sql: |
select * from generate_series(1, 10000000, 1) where Now() is null;
batch_plan: 'BatchValues { rows: [] }'
Expand All @@ -237,7 +237,7 @@
└─StreamExchange { dist: HashShard(t.v, t._row_id, t._row_id) }
└─StreamHashJoin { type: Inner, predicate: t.v = t.v, output: [t.v, t._row_id, t._row_id] }
├─StreamExchange { dist: HashShard(t.v) }
│ └─StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) }
│ └─StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t._row_id], pk: [_row_id], dist: UpstreamHashShard(t._row_id) }
└─StreamExchange { dist: HashShard(t.v) }
└─StreamFilter { predicate: false:Boolean }
└─StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) }
└─StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t._row_id], pk: [_row_id], dist: UpstreamHashShard(t._row_id) }
16 changes: 8 additions & 8 deletions src/frontend/planner_test/tests/testdata/output/bushy_join.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,30 +19,30 @@
│ │ └─StreamHashJoin { type: Inner, predicate: t.id = t.id, output: [t.id, t.id, t._row_id, t._row_id] }
│ │ ├─StreamExchange { dist: HashShard(t.id) }
│ │ │ └─StreamFilter { predicate: IsNotNull(t.id) }
│ │ │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) }
│ │ │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t._row_id], pk: [_row_id], dist: UpstreamHashShard(t._row_id) }
│ │ └─StreamExchange { dist: HashShard(t.id) }
│ │ └─StreamFilter { predicate: IsNotNull(t.id) }
│ │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) }
│ │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t._row_id], pk: [_row_id], dist: UpstreamHashShard(t._row_id) }
│ └─StreamHashJoin { type: Inner, predicate: t.id = t.id, output: [t.id, t.id, t._row_id, t._row_id] }
│ ├─StreamExchange { dist: HashShard(t.id) }
│ │ └─StreamFilter { predicate: IsNotNull(t.id) }
│ │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) }
│ │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t._row_id], pk: [_row_id], dist: UpstreamHashShard(t._row_id) }
│ └─StreamExchange { dist: HashShard(t.id) }
│ └─StreamFilter { predicate: IsNotNull(t.id) }
│ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) }
│ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t._row_id], pk: [_row_id], dist: UpstreamHashShard(t._row_id) }
└─StreamHashJoin { type: Inner, predicate: t.id = t.id AND t.id = t.id AND t.id = t.id AND t.id = t.id, output: [t.id, t.id, t.id, t.id, t._row_id, t._row_id, t._row_id, t._row_id] }
├─StreamExchange { dist: HashShard(t.id) }
│ └─StreamHashJoin { type: Inner, predicate: t.id = t.id, output: [t.id, t.id, t._row_id, t._row_id] }
│ ├─StreamExchange { dist: HashShard(t.id) }
│ │ └─StreamFilter { predicate: IsNotNull(t.id) }
│ │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) }
│ │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t._row_id], pk: [_row_id], dist: UpstreamHashShard(t._row_id) }
│ └─StreamExchange { dist: HashShard(t.id) }
│ └─StreamFilter { predicate: IsNotNull(t.id) }
│ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) }
│ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t._row_id], pk: [_row_id], dist: UpstreamHashShard(t._row_id) }
└─StreamHashJoin { type: Inner, predicate: t.id = t.id, output: [t.id, t.id, t._row_id, t._row_id] }
├─StreamExchange { dist: HashShard(t.id) }
│ └─StreamFilter { predicate: IsNotNull(t.id) }
│ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) }
│ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t._row_id], pk: [_row_id], dist: UpstreamHashShard(t._row_id) }
└─StreamExchange { dist: HashShard(t.id) }
└─StreamFilter { predicate: IsNotNull(t.id) }
└─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) }
└─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t._row_id], pk: [_row_id], dist: UpstreamHashShard(t._row_id) }
Loading

0 comments on commit 0f0e3e7

Please sign in to comment.