From 3a78bd57cb33127567c2783cbf2109892e5eb8c0 Mon Sep 17 00:00:00 2001 From: Andrey Koshchiy Date: Mon, 4 Dec 2023 11:03:08 +0400 Subject: [PATCH 01/16] feat: add json_path_match & @?,@@ operators (#13906) --- Cargo.lock | 20 +- Cargo.toml | 2 +- src/query/ast/src/ast/expr.rs | 12 + src/query/ast/src/parser/expr.rs | 2 + src/query/ast/src/parser/token.rs | 6 + .../ast/tests/it/testdata/expr-error.txt | 2 +- .../ast/tests/it/testdata/statement-error.txt | 2 +- src/query/functions/src/scalars/variant.rs | 168 ++++++++-- .../it/scalars/testdata/function_list.txt | 4 +- .../tests/it/scalars/testdata/variant.txt | 309 +++++++++++++++++- .../functions/tests/it/scalars/variant.rs | 141 ++++++++ .../02_function/02_0065_function_json.test | 152 +++++++++ 12 files changed, 770 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e4f61724ae71..ab60f2a1db94 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2006,7 +2006,7 @@ dependencies = [ "goldenfile", "hex", "itertools 0.10.5", - "jsonb 0.3.0 (git+https://github.com/datafuselabs/jsonb?rev=1d7a3e9)", + "jsonb 0.3.0 (git+https://github.com/datafuselabs/jsonb?rev=582c139)", "lexical-core", "log", "match-template", @@ -2040,7 +2040,7 @@ dependencies = [ "common-io", "common-meta-app", "common-settings", - "jsonb 0.3.0 (git+https://github.com/datafuselabs/jsonb?rev=1d7a3e9)", + "jsonb 0.3.0 (git+https://github.com/datafuselabs/jsonb?rev=582c139)", "lexical-core", "match-template", "micromarshal", @@ -2087,7 +2087,7 @@ dependencies = [ "h3o", "hex", "itertools 0.10.5", - "jsonb 0.3.0 (git+https://github.com/datafuselabs/jsonb?rev=1d7a3e9)", + "jsonb 0.3.0 (git+https://github.com/datafuselabs/jsonb?rev=582c139)", "lexical-core", "libm", "match-template", @@ -2571,7 +2571,7 @@ dependencies = [ "common-expression", "common-pipeline-core", "common-profile", - "jsonb 0.3.0 (git+https://github.com/datafuselabs/jsonb?rev=1d7a3e9)", + "jsonb 0.3.0 (git+https://github.com/datafuselabs/jsonb?rev=582c139)", "match-template", ] @@ -2806,7 +2806,7 @@ dependencies = [ "futures-util", "indexmap 2.0.0", "itertools 0.10.5", - "jsonb 0.3.0 (git+https://github.com/datafuselabs/jsonb?rev=1d7a3e9)", + "jsonb 0.3.0 (git+https://github.com/datafuselabs/jsonb?rev=582c139)", "log", "metrics", "minitrace", @@ -3128,7 +3128,7 @@ dependencies = [ "common-users", "futures", "itertools 0.10.5", - "jsonb 0.3.0 (git+https://github.com/datafuselabs/jsonb?rev=1d7a3e9)", + "jsonb 0.3.0 (git+https://github.com/datafuselabs/jsonb?rev=582c139)", "log", "once_cell", "opendal", @@ -4015,7 +4015,7 @@ dependencies = [ "humantime", "indicatif", "itertools 0.10.5", - "jsonb 0.3.0 (git+https://github.com/datafuselabs/jsonb?rev=1d7a3e9)", + "jsonb 0.3.0 (git+https://github.com/datafuselabs/jsonb?rev=582c139)", "jwt-simple", "lazy_static", "log", @@ -4135,7 +4135,7 @@ dependencies = [ "databend-sql", "ethnum", "itertools 0.11.0", - "jsonb 0.3.0 (git+https://github.com/datafuselabs/jsonb?rev=1d7a3e9)", + "jsonb 0.3.0 (git+https://github.com/datafuselabs/jsonb?rev=582c139)", "rand 0.8.5", "roaring", "tokio", @@ -4519,7 +4519,7 @@ dependencies = [ "databend-query", "futures", "futures-util", - "jsonb 0.3.0 (git+https://github.com/datafuselabs/jsonb?rev=1d7a3e9)", + "jsonb 0.3.0 (git+https://github.com/datafuselabs/jsonb?rev=582c139)", "jwt-simple", "log", "opendal", @@ -7318,7 +7318,7 @@ dependencies = [ [[package]] name = "jsonb" version = "0.3.0" -source = "git+https://github.com/datafuselabs/jsonb?rev=1d7a3e9#1d7a3e94e45271086510ed4c8b71acc5bf2104bc" +source = "git+https://github.com/datafuselabs/jsonb?rev=582c139#582c139d884248bcb357e995d8ea4501fc264e60" dependencies = [ "byteorder", "fast-float", diff --git a/Cargo.toml b/Cargo.toml index c87130c49cd0..b0a61ba2a1ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -119,7 +119,7 @@ opendal = { version = "0.42", features = [ ] } ethnum = { version = "1.3.2" } ordered-float = { version = "3.6.0", default-features = false } -jsonb = { git = "https://github.com/datafuselabs/jsonb", rev = "1d7a3e9" } +jsonb = { git = "https://github.com/datafuselabs/jsonb", rev = "582c139" } # openraft = { version = "0.8.2", features = ["compat-07"] } # For debugging diff --git a/src/query/ast/src/ast/expr.rs b/src/query/ast/src/ast/expr.rs index 3f41c4de80c5..dcf1d9e7f12a 100644 --- a/src/query/ast/src/ast/expr.rs +++ b/src/query/ast/src/ast/expr.rs @@ -493,6 +493,10 @@ pub enum JsonOperator { AtArrow, /// <@ Checks whether right json contains the left json ArrowAt, + /// @? Checks whether JSON path return any item for the specified JSON value + AtQuestion, + /// @@ Returns the result of a JSON path predicate check for the specified JSON value. + AtAt, } impl JsonOperator { @@ -507,6 +511,8 @@ impl JsonOperator { JsonOperator::QuestionAnd => "json_exists_all_keys".to_string(), JsonOperator::AtArrow => "json_contains_in_left".to_string(), JsonOperator::ArrowAt => "json_contains_in_right".to_string(), + JsonOperator::AtQuestion => "json_path_exists".to_string(), + JsonOperator::AtAt => "json_path_match".to_string(), } } } @@ -777,6 +783,12 @@ impl Display for JsonOperator { JsonOperator::ArrowAt => { write!(f, "<@") } + JsonOperator::AtQuestion => { + write!(f, "@?") + } + JsonOperator::AtAt => { + write!(f, "@@") + } } } } diff --git a/src/query/ast/src/parser/expr.rs b/src/query/ast/src/parser/expr.rs index 03350b09a385..d227aa610c03 100644 --- a/src/query/ast/src/parser/expr.rs +++ b/src/query/ast/src/parser/expr.rs @@ -1338,6 +1338,8 @@ pub fn json_op(i: Input) -> IResult { value(JsonOperator::QuestionAnd, rule! { "?&" }), value(JsonOperator::AtArrow, rule! { "@>" }), value(JsonOperator::ArrowAt, rule! { "<@" }), + value(JsonOperator::AtQuestion, rule! { "@?" }), + value(JsonOperator::AtAt, rule! { "@@" }), ))(i) } diff --git a/src/query/ast/src/parser/token.rs b/src/query/ast/src/parser/token.rs index 25cb2a5ada94..aed2e06e2385 100644 --- a/src/query/ast/src/parser/token.rs +++ b/src/query/ast/src/parser/token.rs @@ -298,6 +298,12 @@ pub enum TokenKind { /// Used as JSON operator. #[token("@>")] AtArrow, + /// Used as JSON operator. + #[token("@?")] + AtQuestion, + /// Used as JSON operator. + #[token("@@")] + AtAt, // Keywords // diff --git a/src/query/ast/tests/it/testdata/expr-error.txt b/src/query/ast/tests/it/testdata/expr-error.txt index a4c1e4400e15..0d40357b074e 100644 --- a/src/query/ast/tests/it/testdata/expr-error.txt +++ b/src/query/ast/tests/it/testdata/expr-error.txt @@ -53,7 +53,7 @@ error: --> SQL:1:10 | 1 | CAST(col1) - | ---- ^ unexpected `)`, expecting `AS`, `,`, `(`, `IS`, `NOT`, `IN`, `EXISTS`, `BETWEEN`, `+`, `-`, `*`, `/`, `//`, `DIV`, `%`, `||`, `<->`, `>`, `<`, `>=`, `<=`, `=`, `<>`, `!=`, `^`, `AND`, `OR`, `XOR`, `LIKE`, `REGEXP`, `RLIKE`, `SOUNDS`, , , , , , `->`, `->>`, `#>`, `#>>`, `?`, `?|`, `?&`, `@>`, `<@`, , , , , , `CAST`, `TRY_CAST`, `DATE_ADD`, `DATE_SUB`, `DATE_TRUNC`, `DATE`, `TIMESTAMP`, `INTERVAL`, `::`, or 26 more ... + | ---- ^ unexpected `)`, expecting `AS`, `,`, `(`, `IS`, `NOT`, `IN`, `EXISTS`, `BETWEEN`, `+`, `-`, `*`, `/`, `//`, `DIV`, `%`, `||`, `<->`, `>`, `<`, `>=`, `<=`, `=`, `<>`, `!=`, `^`, `AND`, `OR`, `XOR`, `LIKE`, `REGEXP`, `RLIKE`, `SOUNDS`, , , , , , `->`, `->>`, `#>`, `#>>`, `?`, `?|`, `?&`, `@>`, `<@`, `@?`, `@@`, , , , , , `CAST`, `TRY_CAST`, `DATE_ADD`, `DATE_SUB`, `DATE_TRUNC`, `DATE`, `TIMESTAMP`, or 28 more ... | | | while parsing `CAST(... AS ...)` | while parsing expression diff --git a/src/query/ast/tests/it/testdata/statement-error.txt b/src/query/ast/tests/it/testdata/statement-error.txt index 87f6bb7dd94e..78abfdf75bea 100644 --- a/src/query/ast/tests/it/testdata/statement-error.txt +++ b/src/query/ast/tests/it/testdata/statement-error.txt @@ -415,7 +415,7 @@ error: --> SQL:1:41 | 1 | SELECT * FROM t GROUP BY GROUPING SETS () - | ------ ^ unexpected `)`, expecting `(`, `IS`, `IN`, `EXISTS`, `BETWEEN`, `+`, `-`, `*`, `/`, `//`, `DIV`, `%`, `||`, `<->`, `>`, `<`, `>=`, `<=`, `=`, `<>`, `!=`, `^`, `AND`, `OR`, `XOR`, `LIKE`, `NOT`, `REGEXP`, `RLIKE`, `SOUNDS`, , , , , , `->`, `->>`, `#>`, `#>>`, `?`, `?|`, `?&`, `@>`, `<@`, , , , , , `CAST`, `TRY_CAST`, `DATE_ADD`, `DATE_SUB`, `DATE_TRUNC`, `DATE`, `TIMESTAMP`, `INTERVAL`, `::`, `EXTRACT`, `DATE_PART`, or 24 more ... + | ------ ^ unexpected `)`, expecting `(`, `IS`, `IN`, `EXISTS`, `BETWEEN`, `+`, `-`, `*`, `/`, `//`, `DIV`, `%`, `||`, `<->`, `>`, `<`, `>=`, `<=`, `=`, `<>`, `!=`, `^`, `AND`, `OR`, `XOR`, `LIKE`, `NOT`, `REGEXP`, `RLIKE`, `SOUNDS`, , , , , , `->`, `->>`, `#>`, `#>>`, `?`, `?|`, `?&`, `@>`, `<@`, `@?`, `@@`, , , , , , `CAST`, `TRY_CAST`, `DATE_ADD`, `DATE_SUB`, `DATE_TRUNC`, `DATE`, `TIMESTAMP`, `INTERVAL`, `::`, or 26 more ... | | | while parsing `SELECT ...` diff --git a/src/query/functions/src/scalars/variant.rs b/src/query/functions/src/scalars/variant.rs index 69a6aab296ad..9bd38003448e 100644 --- a/src/query/functions/src/scalars/variant.rs +++ b/src/query/functions/src/scalars/variant.rs @@ -78,10 +78,12 @@ use jsonb::get_by_path_first; use jsonb::is_array; use jsonb::is_object; use jsonb::jsonpath::parse_json_path; +use jsonb::jsonpath::JsonPath; use jsonb::keypath::parse_key_paths; use jsonb::object_keys; use jsonb::parse_value; use jsonb::path_exists; +use jsonb::path_match; use jsonb::strip_nulls; use jsonb::to_bool; use jsonb::to_f64; @@ -513,33 +515,53 @@ pub fn register(registry: &mut FunctionRegistry) { ), ); - registry.register_passthrough_nullable_2_arg::( - "json_path_exists", - |_, _, _| FunctionDomain::MayThrow, - vectorize_with_builder_2_arg::( - |val, path, output, ctx| { - if let Some(validity) = &ctx.validity { - if !validity.get_bit(output.len()) { - output.push(false); - return; - } - } - match parse_json_path(path) { - Ok(json_path) => { - let res = path_exists(val, json_path); - output.push(res); - } - Err(_) => { - ctx.set_error( - output.len(), - format!("Invalid JSON Path '{}'", &String::from_utf8_lossy(path),), - ); - output.push(false); - } - } + registry.register_function_factory("json_path_match", |_, args_type| { + if args_type.len() != 2 { + return None; + } + if (args_type[0].remove_nullable() != DataType::Variant && args_type[0] != DataType::Null) + || (args_type[1].remove_nullable() != DataType::String + && args_type[1] != DataType::Null) + { + return None; + } + Some(Arc::new(Function { + signature: FunctionSignature { + name: "json_path_match".to_string(), + args_type: args_type.to_vec(), + return_type: DataType::Nullable(Box::new(DataType::Boolean)), }, - ), - ); + eval: FunctionEval::Scalar { + calc_domain: Box::new(|_, _| FunctionDomain::MayThrow), + eval: Box::new(|args, ctx| path_predicate_fn(args, ctx, path_match)), + }, + })) + }); + + registry.register_function_factory("json_path_exists", |_, args_type| { + if args_type.len() != 2 { + return None; + } + if (args_type[0].remove_nullable() != DataType::Variant && args_type[0] != DataType::Null) + || (args_type[1].remove_nullable() != DataType::String + && args_type[1] != DataType::Null) + { + return None; + } + Some(Arc::new(Function { + signature: FunctionSignature { + name: "json_path_exists".to_string(), + args_type: args_type.to_vec(), + return_type: DataType::Nullable(Box::new(DataType::Boolean)), + }, + eval: FunctionEval::Scalar { + calc_domain: Box::new(|_, _| FunctionDomain::Full), + eval: Box::new(|args, ctx| { + path_predicate_fn(args, ctx, |json, path| Ok(path_exists(json, path))) + }), + }, + })) + }); registry.register_combine_nullable_2_arg::( "get_path", @@ -1470,3 +1492,97 @@ fn get_by_keypath_fn( } } } + +fn path_predicate_fn<'a, P>( + args: &'a [ValueRef], + ctx: &'a mut EvalContext, + predicate: P, +) -> Value +where + P: Fn(&'a [u8], JsonPath<'a>) -> Result, +{ + let scalar_jsonpath = match &args[1] { + ValueRef::Scalar(ScalarRef::String(v)) => { + let res = parse_json_path(v) + .map_err(|_| format!("Invalid JSON Path '{}'", &String::from_utf8_lossy(v))); + Some(res) + } + _ => None, + }; + + let len_opt = args.iter().find_map(|arg| match arg { + ValueRef::Column(col) => Some(col.len()), + _ => None, + }); + let len = len_opt.unwrap_or(1); + + let mut output = MutableBitmap::with_capacity(len); + let mut validity = MutableBitmap::with_capacity(len); + + for idx in 0..len { + let jsonpath = match &args[1] { + ValueRef::Scalar(_) => scalar_jsonpath.clone(), + ValueRef::Column(col) => { + let scalar = unsafe { col.index_unchecked(idx) }; + match scalar { + ScalarRef::String(buf) => { + let res = parse_json_path(buf).map_err(|_| { + format!("Invalid JSON Path '{}'", &String::from_utf8_lossy(buf)) + }); + Some(res) + } + _ => None, + } + } + }; + match jsonpath { + Some(result) => match result { + Ok(path) => { + let json_row = match &args[0] { + ValueRef::Scalar(scalar) => scalar.clone(), + ValueRef::Column(col) => unsafe { col.index_unchecked(idx) }, + }; + match json_row { + ScalarRef::Variant(json) => match predicate(json, path) { + Ok(r) => { + output.push(r); + validity.push(true); + } + Err(err) => { + ctx.set_error(output.len(), err.to_string()); + output.push(false); + validity.push(false); + } + }, + _ => { + output.push(false); + validity.push(false); + } + } + } + Err(err) => { + ctx.set_error(output.len(), err); + output.push(false); + validity.push(false); + } + }, + None => { + output.push(false); + validity.push(false); + } + } + } + + let validity: Bitmap = validity.into(); + + match len_opt { + Some(_) => Value::Column(Column::Boolean(output.into())).wrap_nullable(Some(validity)), + None => { + if !validity.get_bit(0) { + Value::Scalar(Scalar::Null) + } else { + Value::Scalar(Scalar::Boolean(output.get(0))) + } + } + } +} diff --git a/src/query/functions/tests/it/scalars/testdata/function_list.txt b/src/query/functions/tests/it/scalars/testdata/function_list.txt index 0f74dd010480..b932d1b05533 100644 --- a/src/query/functions/tests/it/scalars/testdata/function_list.txt +++ b/src/query/functions/tests/it/scalars/testdata/function_list.txt @@ -1870,8 +1870,8 @@ Functions overloads: 0 json_object FACTORY 0 json_object_keep_null FACTORY 0 json_object_keys(Variant NULL) :: Variant NULL -0 json_path_exists(Variant, String) :: Boolean -1 json_path_exists(Variant NULL, String NULL) :: Boolean NULL +0 json_path_exists FACTORY +0 json_path_match FACTORY 0 json_path_query FACTORY 0 json_path_query_array(Variant, String) :: Variant NULL 1 json_path_query_array(Variant NULL, String NULL) :: Variant NULL diff --git a/src/query/functions/tests/it/scalars/testdata/variant.txt b/src/query/functions/tests/it/scalars/testdata/variant.txt index c3b193409499..fc9a77c02b89 100644 --- a/src/query/functions/tests/it/scalars/testdata/variant.txt +++ b/src/query/functions/tests/it/scalars/testdata/variant.txt @@ -2739,7 +2739,7 @@ evaluation (internal): ast : json_path_exists(NULL, '$.a') raw expr : json_path_exists(NULL, '$.a') -checked expr : json_path_exists(CAST(NULL AS Variant NULL), CAST("$.a" AS String NULL)) +checked expr : json_path_exists(NULL, "$.a") optimized expr : NULL output type : Boolean NULL output domain : {NULL} @@ -2748,7 +2748,7 @@ output : NULL ast : json_path_exists(parse_json('{"a": 1, "b": 2}'), NULL) raw expr : json_path_exists(parse_json('{"a": 1, "b": 2}'), NULL) -checked expr : json_path_exists(CAST(parse_json("{\"a\": 1, \"b\": 2}") AS Variant NULL), CAST(NULL AS String NULL)) +checked expr : json_path_exists(parse_json("{\"a\": 1, \"b\": 2}"), NULL) optimized expr : NULL output type : Boolean NULL output domain : {NULL} @@ -2759,7 +2759,7 @@ ast : json_path_exists(parse_json('{"a": 1, "b": 2}'), '$.a') raw expr : json_path_exists(parse_json('{"a": 1, "b": 2}'), '$.a') checked expr : json_path_exists(parse_json("{\"a\": 1, \"b\": 2}"), "$.a") optimized expr : true -output type : Boolean +output type : Boolean NULL output domain : {TRUE} output : true @@ -2768,7 +2768,7 @@ ast : json_path_exists(parse_json('{"a": 1, "b": 2}'), '$.c') raw expr : json_path_exists(parse_json('{"a": 1, "b": 2}'), '$.c') checked expr : json_path_exists(parse_json("{\"a\": 1, \"b\": 2}"), "$.c") optimized expr : false -output type : Boolean +output type : Boolean NULL output domain : {FALSE} output : false @@ -2777,7 +2777,7 @@ ast : json_path_exists(parse_json('{"a": 1, "b": 2}'), '$.a ? (@ == 1 raw expr : json_path_exists(parse_json('{"a": 1, "b": 2}'), '$.a ? (@ == 1)') checked expr : json_path_exists(parse_json("{\"a\": 1, \"b\": 2}"), "$.a ? (@ == 1)") optimized expr : true -output type : Boolean +output type : Boolean NULL output domain : {TRUE} output : true @@ -2786,7 +2786,7 @@ ast : json_path_exists(parse_json('{"a": 1, "b": 2}'), '$.a ? (@ > 1) raw expr : json_path_exists(parse_json('{"a": 1, "b": 2}'), '$.a ? (@ > 1)') checked expr : json_path_exists(parse_json("{\"a\": 1, \"b\": 2}"), "$.a ? (@ > 1)") optimized expr : false -output type : Boolean +output type : Boolean NULL output domain : {FALSE} output : false @@ -2795,7 +2795,7 @@ ast : json_path_exists(parse_json('{"a": 1, "b": [1,2,3]}'), '$.b[0]' raw expr : json_path_exists(parse_json('{"a": 1, "b": [1,2,3]}'), '$.b[0]') checked expr : json_path_exists(parse_json("{\"a\": 1, \"b\": [1,2,3]}"), "$.b[0]") optimized expr : true -output type : Boolean +output type : Boolean NULL output domain : {TRUE} output : true @@ -2804,7 +2804,7 @@ ast : json_path_exists(parse_json('{"a": 1, "b": [1,2,3]}'), '$.b[3]' raw expr : json_path_exists(parse_json('{"a": 1, "b": [1,2,3]}'), '$.b[3]') checked expr : json_path_exists(parse_json("{\"a\": 1, \"b\": [1,2,3]}"), "$.b[3]") optimized expr : false -output type : Boolean +output type : Boolean NULL output domain : {FALSE} output : false @@ -2813,7 +2813,7 @@ ast : json_path_exists(parse_json('{"a": 1, "b": [1,2,3]}'), '$.b[1 t raw expr : json_path_exists(parse_json('{"a": 1, "b": [1,2,3]}'), '$.b[1 to last] ? (@ >=2 && @ <=3)') checked expr : json_path_exists(parse_json("{\"a\": 1, \"b\": [1,2,3]}"), "$.b[1 to last] ? (@ >=2 && @ <=3)") optimized expr : true -output type : Boolean +output type : Boolean NULL output domain : {TRUE} output : true @@ -3999,3 +3999,294 @@ output domain : {TRUE} output : true +ast : json_path_match(parse_json('{"a":1,"b":2}'), '$.a == 1') +raw expr : json_path_match(parse_json('{"a":1,"b":2}'), '$.a == 1') +checked expr : json_path_match(parse_json("{\"a\":1,\"b\":2}"), "$.a == 1") +optimized expr : true +output type : Boolean NULL +output domain : {TRUE} +output : true + + +ast : json_path_match(parse_json('{"a":1,"b":2}'), '$.a > 1') +raw expr : json_path_match(parse_json('{"a":1,"b":2}'), '$.a > 1') +checked expr : json_path_match(parse_json("{\"a\":1,\"b\":2}"), "$.a > 1") +optimized expr : false +output type : Boolean NULL +output domain : {FALSE} +output : false + + +ast : json_path_match(parse_json('{"a":1,"b":2}'), '$.c > 0') +raw expr : json_path_match(parse_json('{"a":1,"b":2}'), '$.c > 0') +checked expr : json_path_match(parse_json("{\"a\":1,\"b\":2}"), "$.c > 0") +optimized expr : false +output type : Boolean NULL +output domain : {FALSE} +output : false + + +ast : json_path_match(parse_json('{"a":1,"b":2}'), '$.b < 2') +raw expr : json_path_match(parse_json('{"a":1,"b":2}'), '$.b < 2') +checked expr : json_path_match(parse_json("{\"a\":1,\"b\":2}"), "$.b < 2") +optimized expr : false +output type : Boolean NULL +output domain : {FALSE} +output : false + + +ast : json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[0] == 1') +raw expr : json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[0] == 1') +checked expr : json_path_match(parse_json("{\"a\":1,\"b\":[1,2,3]}"), "$.b[0] == 1") +optimized expr : true +output type : Boolean NULL +output domain : {TRUE} +output : true + + +ast : json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[0] > 1') +raw expr : json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[0] > 1') +checked expr : json_path_match(parse_json("{\"a\":1,\"b\":[1,2,3]}"), "$.b[0] > 1") +optimized expr : false +output type : Boolean NULL +output domain : {FALSE} +output : false + + +ast : json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[3] == 0') +raw expr : json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[3] == 0') +checked expr : json_path_match(parse_json("{\"a\":1,\"b\":[1,2,3]}"), "$.b[3] == 0") +optimized expr : false +output type : Boolean NULL +output domain : {FALSE} +output : false + + +ast : json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[1 to last] >= 2') +raw expr : json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[1 to last] >= 2') +checked expr : json_path_match(parse_json("{\"a\":1,\"b\":[1,2,3]}"), "$.b[1 to last] >= 2") +optimized expr : true +output type : Boolean NULL +output domain : {TRUE} +output : true + + +ast : json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[1 to last] == 2 || $.b[1 to last] == 3') +raw expr : json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[1 to last] == 2 || $.b[1 to last] == 3') +checked expr : json_path_match(parse_json("{\"a\":1,\"b\":[1,2,3]}"), "$.b[1 to last] == 2 || $.b[1 to last] == 3") +optimized expr : true +output type : Boolean NULL +output domain : {TRUE} +output : true + + +ast : json_path_match(parse_json(s), p) +raw expr : json_path_match(parse_json(s::String NULL), p::String) +checked expr : json_path_match(parse_json(s), p) +evaluation: ++--------+------------------------+-----------------------------+--------------+ +| | s | p | Output | ++--------+------------------------+-----------------------------+--------------+ +| Type | String NULL | String | Boolean NULL | +| Domain | {""..="true"} ∪ {NULL} | {"$.a > 0"..="$[*].k == 1"} | Unknown | +| Row 0 | 'true' | '$.a > 0' | false | +| Row 1 | '[{"k":1},{"k":2}]' | '$[*].k == 1' | true | +| Row 2 | NULL | '$[*] > 1' | NULL | +| Row 3 | '[1,2,3,4]' | '$[*] > 2' | true | ++--------+------------------------+-----------------------------+--------------+ +evaluation (internal): ++--------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| Column | Data | ++--------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| s | NullableColumn { column: StringColumn { data: 0x747275655b7b226b223a317d2c7b226b223a327d5d5b312c322c332c345d, offsets: [0, 4, 21, 21, 30] }, validity: [0b____1011] } | +| p | StringColumn { data: 0x242e61203e2030245b2a5d2e6b203d3d2031245b2a5d203e2031245b2a5d203e2032, offsets: [0, 7, 18, 26, 34] } | +| Output | NullableColumn { column: Boolean([0b____1010]), validity: [0b____1011] } | ++--------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + +ast : parse_json('{"a":1,"b":2}') @@ '$.a == 1' +raw expr : json_path_match(parse_json('{"a":1,"b":2}'), '$.a == 1') +checked expr : json_path_match(parse_json("{\"a\":1,\"b\":2}"), "$.a == 1") +optimized expr : true +output type : Boolean NULL +output domain : {TRUE} +output : true + + +ast : parse_json('{"a":1,"b":2}') @@ '$.a > 1' +raw expr : json_path_match(parse_json('{"a":1,"b":2}'), '$.a > 1') +checked expr : json_path_match(parse_json("{\"a\":1,\"b\":2}"), "$.a > 1") +optimized expr : false +output type : Boolean NULL +output domain : {FALSE} +output : false + + +ast : parse_json('{"a":1,"b":2}') @@ '$.c > 0' +raw expr : json_path_match(parse_json('{"a":1,"b":2}'), '$.c > 0') +checked expr : json_path_match(parse_json("{\"a\":1,\"b\":2}"), "$.c > 0") +optimized expr : false +output type : Boolean NULL +output domain : {FALSE} +output : false + + +ast : parse_json('{"a":1,"b":2}') @@ '$.b < 2' +raw expr : json_path_match(parse_json('{"a":1,"b":2}'), '$.b < 2') +checked expr : json_path_match(parse_json("{\"a\":1,\"b\":2}"), "$.b < 2") +optimized expr : false +output type : Boolean NULL +output domain : {FALSE} +output : false + + +ast : parse_json('{"a":1,"b":[1,2,3]}') @@ '$.b[0] == 1' +raw expr : json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[0] == 1') +checked expr : json_path_match(parse_json("{\"a\":1,\"b\":[1,2,3]}"), "$.b[0] == 1") +optimized expr : true +output type : Boolean NULL +output domain : {TRUE} +output : true + + +ast : parse_json('{"a":1,"b":[1,2,3]}') @@ '$.b[0] > 1' +raw expr : json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[0] > 1') +checked expr : json_path_match(parse_json("{\"a\":1,\"b\":[1,2,3]}"), "$.b[0] > 1") +optimized expr : false +output type : Boolean NULL +output domain : {FALSE} +output : false + + +ast : parse_json('{"a":1,"b":[1,2,3]}') @@ '$.b[3] == 0' +raw expr : json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[3] == 0') +checked expr : json_path_match(parse_json("{\"a\":1,\"b\":[1,2,3]}"), "$.b[3] == 0") +optimized expr : false +output type : Boolean NULL +output domain : {FALSE} +output : false + + +ast : parse_json('{"a":1,"b":[1,2,3]}') @@ '$.b[1 to last] >= 2' +raw expr : json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[1 to last] >= 2') +checked expr : json_path_match(parse_json("{\"a\":1,\"b\":[1,2,3]}"), "$.b[1 to last] >= 2") +optimized expr : true +output type : Boolean NULL +output domain : {TRUE} +output : true + + +ast : parse_json('{"a":1,"b":[1,2,3]}') @@ '$.b[1 to last] == 2 || $.b[1 to last] == 3' +raw expr : json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[1 to last] == 2 || $.b[1 to last] == 3') +checked expr : json_path_match(parse_json("{\"a\":1,\"b\":[1,2,3]}"), "$.b[1 to last] == 2 || $.b[1 to last] == 3") +optimized expr : true +output type : Boolean NULL +output domain : {TRUE} +output : true + + +ast : parse_json(s) @@ p +raw expr : json_path_match(parse_json(s::String NULL), p::String) +checked expr : json_path_match(parse_json(s), p) +evaluation: ++--------+------------------------+-----------------------------+--------------+ +| | s | p | Output | ++--------+------------------------+-----------------------------+--------------+ +| Type | String NULL | String | Boolean NULL | +| Domain | {""..="true"} ∪ {NULL} | {"$.a > 0"..="$[*].k == 1"} | Unknown | +| Row 0 | 'true' | '$.a > 0' | false | +| Row 1 | '[{"k":1},{"k":2}]' | '$[*].k == 1' | true | +| Row 2 | NULL | '$[*] > 1' | NULL | +| Row 3 | '[1,2,3,4]' | '$[*] > 2' | true | ++--------+------------------------+-----------------------------+--------------+ +evaluation (internal): ++--------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| Column | Data | ++--------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| s | NullableColumn { column: StringColumn { data: 0x747275655b7b226b223a317d2c7b226b223a327d5d5b312c322c332c345d, offsets: [0, 4, 21, 21, 30] }, validity: [0b____1011] } | +| p | StringColumn { data: 0x242e61203e2030245b2a5d2e6b203d3d2031245b2a5d203e2031245b2a5d203e2032, offsets: [0, 7, 18, 26, 34] } | +| Output | NullableColumn { column: Boolean([0b____1010]), validity: [0b____1011] } | ++--------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + +ast : NULL @? '$.a' +raw expr : json_path_exists(NULL, '$.a') +checked expr : json_path_exists(NULL, "$.a") +optimized expr : NULL +output type : Boolean NULL +output domain : {NULL} +output : NULL + + +ast : parse_json('{"a": 1, "b": 2}') @? NULL +raw expr : json_path_exists(parse_json('{"a": 1, "b": 2}'), NULL) +checked expr : json_path_exists(parse_json("{\"a\": 1, \"b\": 2}"), NULL) +optimized expr : NULL +output type : Boolean NULL +output domain : {NULL} +output : NULL + + +ast : parse_json('{"a": 1, "b": 2}') @? '$.a' +raw expr : json_path_exists(parse_json('{"a": 1, "b": 2}'), '$.a') +checked expr : json_path_exists(parse_json("{\"a\": 1, \"b\": 2}"), "$.a") +optimized expr : true +output type : Boolean NULL +output domain : {TRUE} +output : true + + +ast : parse_json('{"a": 1, "b": 2}') @? '$.c' +raw expr : json_path_exists(parse_json('{"a": 1, "b": 2}'), '$.c') +checked expr : json_path_exists(parse_json("{\"a\": 1, \"b\": 2}"), "$.c") +optimized expr : false +output type : Boolean NULL +output domain : {FALSE} +output : false + + +ast : parse_json('{"a": 1, "b": 2}') @? '$.a ? (@ == 1)' +raw expr : json_path_exists(parse_json('{"a": 1, "b": 2}'), '$.a ? (@ == 1)') +checked expr : json_path_exists(parse_json("{\"a\": 1, \"b\": 2}"), "$.a ? (@ == 1)") +optimized expr : true +output type : Boolean NULL +output domain : {TRUE} +output : true + + +ast : parse_json('{"a": 1, "b": 2}') @? '$.a ? (@ > 1)' +raw expr : json_path_exists(parse_json('{"a": 1, "b": 2}'), '$.a ? (@ > 1)') +checked expr : json_path_exists(parse_json("{\"a\": 1, \"b\": 2}"), "$.a ? (@ > 1)") +optimized expr : false +output type : Boolean NULL +output domain : {FALSE} +output : false + + +ast : parse_json('{"a": 1, "b": [1,2,3]}') @? '$.b[0]' +raw expr : json_path_exists(parse_json('{"a": 1, "b": [1,2,3]}'), '$.b[0]') +checked expr : json_path_exists(parse_json("{\"a\": 1, \"b\": [1,2,3]}"), "$.b[0]") +optimized expr : true +output type : Boolean NULL +output domain : {TRUE} +output : true + + +ast : parse_json('{"a": 1, "b": [1,2,3]}') @? '$.b[3]' +raw expr : json_path_exists(parse_json('{"a": 1, "b": [1,2,3]}'), '$.b[3]') +checked expr : json_path_exists(parse_json("{\"a\": 1, \"b\": [1,2,3]}"), "$.b[3]") +optimized expr : false +output type : Boolean NULL +output domain : {FALSE} +output : false + + +ast : parse_json('{"a": 1, "b": [1,2,3]}') @? '$.b[1 to last] ? (@ >=2 && @ <=3)' +raw expr : json_path_exists(parse_json('{"a": 1, "b": [1,2,3]}'), '$.b[1 to last] ? (@ >=2 && @ <=3)') +checked expr : json_path_exists(parse_json("{\"a\": 1, \"b\": [1,2,3]}"), "$.b[1 to last] ? (@ >=2 && @ <=3)") +optimized expr : true +output type : Boolean NULL +output domain : {TRUE} +output : true + + diff --git a/src/query/functions/tests/it/scalars/variant.rs b/src/query/functions/tests/it/scalars/variant.rs index 09de009211b0..bec2b39ea073 100644 --- a/src/query/functions/tests/it/scalars/variant.rs +++ b/src/query/functions/tests/it/scalars/variant.rs @@ -56,6 +56,9 @@ fn test_variant() { test_exists_all_keys_op(file); test_contains_in_left_op(file); test_contains_in_right_op(file); + test_json_path_match(file); + test_json_path_match_op(file); + test_json_path_exists_op(file); } fn test_parse_json(file: &mut impl Write) { @@ -1017,6 +1020,144 @@ fn test_json_path_exists(file: &mut impl Write) { ); } +fn test_json_path_exists_op(file: &mut impl Write) { + run_ast(file, "NULL @? '$.a'", &[]); + run_ast(file, r#"parse_json('{"a": 1, "b": 2}') @? NULL"#, &[]); + run_ast(file, r#"parse_json('{"a": 1, "b": 2}') @? '$.a'"#, &[]); + run_ast(file, r#"parse_json('{"a": 1, "b": 2}') @? '$.c'"#, &[]); + run_ast( + file, + r#"parse_json('{"a": 1, "b": 2}') @? '$.a ? (@ == 1)'"#, + &[], + ); + run_ast( + file, + r#"parse_json('{"a": 1, "b": 2}') @? '$.a ? (@ > 1)'"#, + &[], + ); + run_ast( + file, + r#"parse_json('{"a": 1, "b": [1,2,3]}') @? '$.b[0]'"#, + &[], + ); + run_ast( + file, + r#"parse_json('{"a": 1, "b": [1,2,3]}') @? '$.b[3]'"#, + &[], + ); + run_ast( + file, + r#"parse_json('{"a": 1, "b": [1,2,3]}') @? '$.b[1 to last] ? (@ >=2 && @ <=3)'"#, + &[], + ); +} + +fn test_json_path_match(file: &mut impl Write) { + run_ast( + file, + r#"json_path_match(parse_json('{"a":1,"b":2}'), '$.a == 1')"#, + &[], + ); + run_ast( + file, + r#"json_path_match(parse_json('{"a":1,"b":2}'), '$.a > 1')"#, + &[], + ); + run_ast( + file, + r#"json_path_match(parse_json('{"a":1,"b":2}'), '$.c > 0')"#, + &[], + ); + run_ast( + file, + r#"json_path_match(parse_json('{"a":1,"b":2}'), '$.b < 2')"#, + &[], + ); + run_ast( + file, + r#"json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[0] == 1')"#, + &[], + ); + run_ast( + file, + r#"json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[0] > 1')"#, + &[], + ); + run_ast( + file, + r#"json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[3] == 0')"#, + &[], + ); + run_ast( + file, + r#"json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[1 to last] >= 2')"#, + &[], + ); + run_ast( + file, + r#"json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[1 to last] == 2 || $.b[1 to last] == 3')"#, + &[], + ); + run_ast(file, "json_path_match(parse_json(s), p)", &[ + ( + "s", + StringType::from_data_with_validity( + vec!["true", "[{\"k\":1},{\"k\":2}]", "", "[1,2,3,4]"], + vec![true, true, false, true], + ), + ), + ( + "p", + StringType::from_data(vec!["$.a > 0", "$[*].k == 1", "$[*] > 1", "$[*] > 2"]), + ), + ]); +} + +fn test_json_path_match_op(file: &mut impl Write) { + run_ast(file, r#"parse_json('{"a":1,"b":2}') @@ '$.a == 1'"#, &[]); + run_ast(file, r#"parse_json('{"a":1,"b":2}') @@ '$.a > 1'"#, &[]); + run_ast(file, r#"parse_json('{"a":1,"b":2}') @@ '$.c > 0'"#, &[]); + run_ast(file, r#"parse_json('{"a":1,"b":2}') @@ '$.b < 2'"#, &[]); + run_ast( + file, + r#"parse_json('{"a":1,"b":[1,2,3]}') @@ '$.b[0] == 1'"#, + &[], + ); + run_ast( + file, + r#"parse_json('{"a":1,"b":[1,2,3]}') @@ '$.b[0] > 1'"#, + &[], + ); + run_ast( + file, + r#"parse_json('{"a":1,"b":[1,2,3]}') @@ '$.b[3] == 0'"#, + &[], + ); + run_ast( + file, + r#"parse_json('{"a":1,"b":[1,2,3]}') @@ '$.b[1 to last] >= 2'"#, + &[], + ); + run_ast( + file, + r#"parse_json('{"a":1,"b":[1,2,3]}') @@ '$.b[1 to last] == 2 || $.b[1 to last] == 3'"#, + &[], + ); + run_ast(file, "parse_json(s) @@ p", &[ + ( + "s", + StringType::from_data_with_validity( + vec!["true", "[{\"k\":1},{\"k\":2}]", "", "[1,2,3,4]"], + vec![true, true, false, true], + ), + ), + ( + "p", + StringType::from_data(vec!["$.a > 0", "$[*].k == 1", "$[*] > 1", "$[*] > 2"]), + ), + ]); +} + fn test_get_by_keypath_op(file: &mut impl Write) { run_ast(file, r#"parse_json('[10, 20, 30]') #> '1'"#, &[]); run_ast(file, "NULL #> NULL", &[]); diff --git a/tests/sqllogictests/suites/query/02_function/02_0065_function_json.test b/tests/sqllogictests/suites/query/02_function/02_0065_function_json.test index 4cafeb23860c..ff9bb9bcfd4e 100644 --- a/tests/sqllogictests/suites/query/02_function/02_0065_function_json.test +++ b/tests/sqllogictests/suites/query/02_function/02_0065_function_json.test @@ -193,6 +193,141 @@ SELECT json_path_exists(parse_json('{"a": 1, "b": [1,2,3]}'), '$.b[1 to last] ? ---- 1 +query T +SELECT NULL @? '$.a' +---- +NULL + +query T +SELECT parse_json('{"a": 1, "b": 2}') @? NULL +---- +NULL + +query T +SELECT parse_json('{"a": 1, "b": 2}') @? '$.a' +---- +1 + +query T +SELECT parse_json('{"a": 1, "b": 2}') @? '$.c' +---- +0 + +query T +SELECT parse_json('{"a": 1, "b": 2}') @? '$.a ? (@ == 1)' +---- +1 + +query T +SELECT parse_json('{"a": 1, "b": 2}') @? '$.a ? (@ > 1)' +---- +0 + +query T +SELECT parse_json('{"a": 1, "b": [1,2,3]}') @? '$.b[0]' +---- +1 + +query T +SELECT parse_json('{"a": 1, "b": [1,2,3]}') @? '$.b[3]' +---- +0 + +query T +SELECT parse_json('{"a": 1, "b": [1,2,3]}') @? '$.b[1 to last] ? (@ >=2 && @ <=3)' +---- +1 + +query T +SELECT json_path_match(parse_json('{"a":1,"b":2}'), '$.a == 1') +---- +1 + +query T +SELECT json_path_match(parse_json('{"a":1,"b":2}'), '$.a > 1') +---- +0 + +query T +SELECT json_path_match(parse_json('{"a":1,"b":2}'), '$.c > 0') +---- +0 + +query T +SELECT json_path_match(parse_json('{"a":1,"b":2}'), '$.b < 2') +---- +0 + +query T +SELECT json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[0] == 1') +---- +1 + +query T +SELECT json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[0] > 1') +---- +0 + +query T +SELECT json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[3] == 0') +---- +0 + +query T +SELECT json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[1 to last] >= 2') +---- +1 + +query T +SELECT json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[1 to last] == 2 || $.b[1 to last] == 3') +---- +1 + +query T +SELECT parse_json('{"a":1,"b":2}') @@ '$.a == 1' +---- +1 + +query T +SELECT parse_json('{"a":1,"b":2}') @@ '$.a > 1' +---- +0 + +query T +SELECT parse_json('{"a":1,"b":2}') @@ '$.c > 0' +---- +0 + +query T +SELECT parse_json('{"a":1,"b":2}') @@ '$.b < 2' +---- +0 + +query T +SELECT parse_json('{"a":1,"b":[1,2,3]}') @@ '$.b[0] == 1' +---- +1 + +query T +SELECT parse_json('{"a":1,"b":[1,2,3]}') @@ '$.b[0] > 1' +---- +0 + +query T +SELECT parse_json('{"a":1,"b":[1,2,3]}') @@ '$.b[3] == 0' +---- +0 + +query T +SELECT parse_json('{"a":1,"b":[1,2,3]}') @@ '$.b[1 to last] >= 2' +---- +1 + +query T +SELECT parse_json('{"a":1,"b":[1,2,3]}') @@ '$.b[1 to last] == 2 || $.b[1 to last] == 3' +---- +1 + query T SELECT json_each(NULL) ---- @@ -398,6 +533,23 @@ select id, v ?& ['a','c'] from t2 order by id 3 0 4 NULL + +query T +select id, v @? '$.b' from t2 order by id +---- +1 0 +2 1 +3 0 +4 NULL + +query T +select id, v @@ '$[*] == "b"' from t2 order by id +---- +1 0 +2 0 +3 1 +4 NULL + statement ok DROP TABLE IF EXISTS t2 From 88ea3d34dc3716b1dc6654c4ea3778a7aeb396b5 Mon Sep 17 00:00:00 2001 From: everpcpc Date: Mon, 4 Dec 2023 15:55:54 +0800 Subject: [PATCH 02/16] chore(ci): fix install ziglang for debian (#13914) --- .github/actions/build_macos/action.yml | 1 + scripts/setup/dev_setup.sh | 29 +++++++++++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/.github/actions/build_macos/action.yml b/.github/actions/build_macos/action.yml index 815d849647e7..b1d762a72834 100644 --- a/.github/actions/build_macos/action.yml +++ b/.github/actions/build_macos/action.yml @@ -21,6 +21,7 @@ runs: - name: Rust setup shell: bash run: | + brew unlink python || true bash ./scripts/setup/dev_setup.sh -yb rustup target add ${{ inputs.target }} diff --git a/scripts/setup/dev_setup.sh b/scripts/setup/dev_setup.sh index 419ae00947da..f9264dc0727c 100755 --- a/scripts/setup/dev_setup.sh +++ b/scripts/setup/dev_setup.sh @@ -98,6 +98,33 @@ function install_build_essentials { esac } +function install_ziglang { + PACKAGE_MANAGER=$1 + + if zig version; then + echo "==> ziglang is already installed" + return + fi + echo "==> installing ziglang..." + + case "$PACKAGE_MANAGER" in + apt-get) + curl -sSfLo /tmp/zig.tar.xz https://ziglang.org/download/0.11.0/zig-linux-x86_64-0.11.0.tar.xz + tar -xf /tmp/zig.tar.xz -C /tmp + "${PRE_COMMAND[@]}" cp /tmp/zig-linux-x86_64-0.11.0/zig /usr/local/bin/ + "${PRE_COMMAND[@]}" chmod +x /usr/local/bin/zig + rm -rf /tmp/zig* + ;; + yum | dnf | brew | pacman) + install_pkg zig "$PACKAGE_MANAGER" + ;; + *) + echo "Unable to install ziglang with package manager: $PACKAGE_MANAGER" + exit 1 + ;; + esac +} + function install_python3 { PACKAGE_MANAGER=$1 @@ -525,7 +552,7 @@ if [[ "$INSTALL_BUILD_TOOLS" == "true" ]]; then install_pkg cmake "$PACKAGE_MANAGER" install_pkg clang "$PACKAGE_MANAGER" install_pkg llvm "$PACKAGE_MANAGER" - install_pkg zig "$PACKAGE_MANAGER" + install_ziglang "$PACKAGE_MANAGER" install_python3 "$PACKAGE_MANAGER" # Any call to cargo will make rustup install the correct toolchain From 0a9c009390f4d41d07e542d352cee6cff52ae5cf Mon Sep 17 00:00:00 2001 From: everpcpc Date: Mon, 4 Dec 2023 16:12:15 +0800 Subject: [PATCH 03/16] chore(ci): install zig on centos (#13915) --- scripts/setup/dev_setup.sh | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/scripts/setup/dev_setup.sh b/scripts/setup/dev_setup.sh index f9264dc0727c..69bdc623d24f 100755 --- a/scripts/setup/dev_setup.sh +++ b/scripts/setup/dev_setup.sh @@ -107,15 +107,16 @@ function install_ziglang { fi echo "==> installing ziglang..." + arch=$(uname -m) case "$PACKAGE_MANAGER" in - apt-get) - curl -sSfLo /tmp/zig.tar.xz https://ziglang.org/download/0.11.0/zig-linux-x86_64-0.11.0.tar.xz + apt-get | yum | dnf | pacman) + curl -sSfLo /tmp/zig.tar.xz "https://ziglang.org/download/0.11.0/zig-linux-${arch}-0.11.0.tar.xz" tar -xf /tmp/zig.tar.xz -C /tmp - "${PRE_COMMAND[@]}" cp /tmp/zig-linux-x86_64-0.11.0/zig /usr/local/bin/ + "${PRE_COMMAND[@]}" cp "/tmp/zig-linux-${arch}-0.11.0/zig" /usr/local/bin/ "${PRE_COMMAND[@]}" chmod +x /usr/local/bin/zig rm -rf /tmp/zig* ;; - yum | dnf | brew | pacman) + brew) install_pkg zig "$PACKAGE_MANAGER" ;; *) From de934a327424e4d24be455708885e7e60fd0ef9e Mon Sep 17 00:00:00 2001 From: Winter Zhang Date: Mon, 4 Dec 2023 17:49:20 +0800 Subject: [PATCH 04/16] chore(cluster): add allow_adjust_parallelism flag for cluster (#13909) * chore(cluster): add expand pipeline flag for cluster * chore(cluster): add allow_adjust_parallelism flag for cluster --- src/query/service/src/api/rpc/exchange/data_exchange.rs | 8 +++++++- .../service/src/api/rpc/exchange/exchange_manager.rs | 1 + src/query/service/src/api/rpc/exchange/exchange_params.rs | 1 + src/query/service/src/api/rpc/exchange/exchange_source.rs | 5 ++++- .../src/interpreters/interpreter_copy_into_table.rs | 1 + src/query/service/src/interpreters/interpreter_delete.rs | 1 + .../service/src/interpreters/interpreter_merge_into.rs | 1 + src/query/service/src/interpreters/interpreter_replace.rs | 2 ++ .../src/interpreters/interpreter_table_optimize.rs | 1 + .../src/interpreters/interpreter_table_recluster.rs | 1 + src/query/service/src/schedulers/fragments/fragmenter.rs | 2 ++ src/query/sql/src/executor/physical_plan_visitor.rs | 2 ++ .../executor/physical_plans/physical_aggregate_final.rs | 1 + .../sql/src/executor/physical_plans/physical_exchange.rs | 2 ++ .../src/executor/physical_plans/physical_exchange_sink.rs | 1 + 15 files changed, 28 insertions(+), 2 deletions(-) diff --git a/src/query/service/src/api/rpc/exchange/data_exchange.rs b/src/query/service/src/api/rpc/exchange/data_exchange.rs index 7e472da5c4fc..7209148e4194 100644 --- a/src/query/service/src/api/rpc/exchange/data_exchange.rs +++ b/src/query/service/src/api/rpc/exchange/data_exchange.rs @@ -58,13 +58,19 @@ impl ShuffleDataExchange { pub struct MergeExchange { pub destination_id: String, pub ignore_exchange: bool, + pub allow_adjust_parallelism: bool, } impl MergeExchange { - pub fn create(destination_id: String, ignore_exchange: bool) -> DataExchange { + pub fn create( + destination_id: String, + ignore_exchange: bool, + allow_adjust_parallelism: bool, + ) -> DataExchange { DataExchange::Merge(MergeExchange { destination_id, ignore_exchange, + allow_adjust_parallelism, }) } } diff --git a/src/query/service/src/api/rpc/exchange/exchange_manager.rs b/src/query/service/src/api/rpc/exchange/exchange_manager.rs index 5257f6a85a43..2ded527b6659 100644 --- a/src/query/service/src/api/rpc/exchange/exchange_manager.rs +++ b/src/query/service/src/api/rpc/exchange/exchange_manager.rs @@ -815,6 +815,7 @@ impl FragmentCoordinator { fragment_id: self.fragment_id, query_id: info.query_id.to_string(), destination_id: exchange.destination_id.clone(), + allow_adjust_parallelism: exchange.allow_adjust_parallelism, ignore_exchange: exchange.ignore_exchange, })) } diff --git a/src/query/service/src/api/rpc/exchange/exchange_params.rs b/src/query/service/src/api/rpc/exchange/exchange_params.rs index 2d9c74c01c63..23a0bfb0c717 100644 --- a/src/query/service/src/api/rpc/exchange/exchange_params.rs +++ b/src/query/service/src/api/rpc/exchange/exchange_params.rs @@ -49,6 +49,7 @@ pub struct MergeExchangeParams { pub destination_id: String, pub schema: DataSchemaRef, pub ignore_exchange: bool, + pub allow_adjust_parallelism: bool, pub exchange_injector: Arc, } diff --git a/src/query/service/src/api/rpc/exchange/exchange_source.rs b/src/query/service/src/api/rpc/exchange/exchange_source.rs index 841f28e352fd..aab9466c0eaf 100644 --- a/src/query/service/src/api/rpc/exchange/exchange_source.rs +++ b/src/query/service/src/api/rpc/exchange/exchange_source.rs @@ -88,6 +88,9 @@ pub fn via_exchange_source( pipeline.add_pipe(Pipe::create(last_output_len, items.len(), items)); - pipeline.try_resize(last_output_len)?; + if params.allow_adjust_parallelism { + pipeline.try_resize(last_output_len)?; + } + injector.apply_merge_deserializer(params, pipeline) } diff --git a/src/query/service/src/interpreters/interpreter_copy_into_table.rs b/src/query/service/src/interpreters/interpreter_copy_into_table.rs index 7826b8a5f608..5122261bc081 100644 --- a/src/query/service/src/interpreters/interpreter_copy_into_table.rs +++ b/src/query/service/src/interpreters/interpreter_copy_into_table.rs @@ -185,6 +185,7 @@ impl CopyIntoTableInterpreter { input: Box::new(root), kind: FragmentKind::Merge, keys: Vec::new(), + allow_adjust_parallelism: true, ignore_exchange: false, }); } diff --git a/src/query/service/src/interpreters/interpreter_delete.rs b/src/query/service/src/interpreters/interpreter_delete.rs index 7010ba53fa54..9364179a2670 100644 --- a/src/query/service/src/interpreters/interpreter_delete.rs +++ b/src/query/service/src/interpreters/interpreter_delete.rs @@ -272,6 +272,7 @@ impl DeleteInterpreter { input: Box::new(root), kind: FragmentKind::Merge, keys: vec![], + allow_adjust_parallelism: true, ignore_exchange: false, }); } diff --git a/src/query/service/src/interpreters/interpreter_merge_into.rs b/src/query/service/src/interpreters/interpreter_merge_into.rs index a1bd44e5d7da..8dac26c351a4 100644 --- a/src/query/service/src/interpreters/interpreter_merge_into.rs +++ b/src/query/service/src/interpreters/interpreter_merge_into.rs @@ -401,6 +401,7 @@ impl MergeIntoInterpreter { input: Box::new(merge_append), kind: FragmentKind::Merge, keys: vec![], + allow_adjust_parallelism: true, ignore_exchange: false, })), table_info: table_info.clone(), diff --git a/src/query/service/src/interpreters/interpreter_replace.rs b/src/query/service/src/interpreters/interpreter_replace.rs index 4709c56ef1a8..7c5999ca1f89 100644 --- a/src/query/service/src/interpreters/interpreter_replace.rs +++ b/src/query/service/src/interpreters/interpreter_replace.rs @@ -278,6 +278,7 @@ impl ReplaceInterpreter { input: root, kind: FragmentKind::Expansive, keys: vec![], + allow_adjust_parallelism: true, ignore_exchange: false, })); } @@ -331,6 +332,7 @@ impl ReplaceInterpreter { input: root, kind: FragmentKind::Merge, keys: vec![], + allow_adjust_parallelism: true, ignore_exchange: false, })); } diff --git a/src/query/service/src/interpreters/interpreter_table_optimize.rs b/src/query/service/src/interpreters/interpreter_table_optimize.rs index e3243eaf304d..c0563acb269d 100644 --- a/src/query/service/src/interpreters/interpreter_table_optimize.rs +++ b/src/query/service/src/interpreters/interpreter_table_optimize.rs @@ -124,6 +124,7 @@ impl OptimizeTableInterpreter { input: Box::new(root), kind: FragmentKind::Merge, keys: vec![], + allow_adjust_parallelism: true, ignore_exchange: false, }); } diff --git a/src/query/service/src/interpreters/interpreter_table_recluster.rs b/src/query/service/src/interpreters/interpreter_table_recluster.rs index 185287c39e76..34865f95f108 100644 --- a/src/query/service/src/interpreters/interpreter_table_recluster.rs +++ b/src/query/service/src/interpreters/interpreter_table_recluster.rs @@ -251,6 +251,7 @@ pub fn build_recluster_physical_plan( input: Box::new(root), kind: FragmentKind::Merge, keys: vec![], + allow_adjust_parallelism: true, ignore_exchange: false, }); } diff --git a/src/query/service/src/schedulers/fragments/fragmenter.rs b/src/query/service/src/schedulers/fragments/fragmenter.rs index d1f28f1946d5..2e550faf6308 100644 --- a/src/query/service/src/schedulers/fragments/fragmenter.rs +++ b/src/query/service/src/schedulers/fragments/fragmenter.rs @@ -105,6 +105,7 @@ impl Fragmenter { FragmentKind::Merge => Ok(Some(MergeExchange::create( Self::get_local_executor(ctx), plan.ignore_exchange, + plan.allow_adjust_parallelism, ))), FragmentKind::Expansive => Ok(Some(BroadcastExchange::create( from_multiple_nodes, @@ -264,6 +265,7 @@ impl PhysicalPlanReplacer for Fragmenter { // set the fragment id to a invalid value here. destination_fragment_id: usize::MAX, ignore_exchange: plan.ignore_exchange, + allow_adjust_parallelism: plan.allow_adjust_parallelism, }); let fragment_type = match self.state { State::SelectLeaf => FragmentType::Source, diff --git a/src/query/sql/src/executor/physical_plan_visitor.rs b/src/query/sql/src/executor/physical_plan_visitor.rs index 12ef0e976be9..e67a6b886adb 100644 --- a/src/query/sql/src/executor/physical_plan_visitor.rs +++ b/src/query/sql/src/executor/physical_plan_visitor.rs @@ -314,6 +314,7 @@ pub trait PhysicalPlanReplacer { kind: plan.kind.clone(), keys: plan.keys.clone(), ignore_exchange: plan.ignore_exchange, + allow_adjust_parallelism: plan.allow_adjust_parallelism, })) } @@ -336,6 +337,7 @@ pub trait PhysicalPlanReplacer { destination_fragment_id: plan.destination_fragment_id, query_id: plan.query_id.clone(), ignore_exchange: plan.ignore_exchange, + allow_adjust_parallelism: plan.allow_adjust_parallelism, })) } diff --git a/src/query/sql/src/executor/physical_plans/physical_aggregate_final.rs b/src/query/sql/src/executor/physical_plans/physical_aggregate_final.rs index 3e0d031e76bc..a29f4785ac85 100644 --- a/src/query/sql/src/executor/physical_plans/physical_aggregate_final.rs +++ b/src/query/sql/src/executor/physical_plans/physical_aggregate_final.rs @@ -214,6 +214,7 @@ impl PhysicalPlanBuilder { PhysicalPlan::Exchange(Exchange { plan_id: self.next_plan_id(), kind, + allow_adjust_parallelism: true, ignore_exchange: false, input: Box::new(PhysicalPlan::AggregatePartial(aggregate_partial)), keys: vec![RemoteExpr::ColumnRef { diff --git a/src/query/sql/src/executor/physical_plans/physical_exchange.rs b/src/query/sql/src/executor/physical_plans/physical_exchange.rs index 966cdab9ef15..eaaf0f652db7 100644 --- a/src/query/sql/src/executor/physical_plans/physical_exchange.rs +++ b/src/query/sql/src/executor/physical_plans/physical_exchange.rs @@ -34,6 +34,7 @@ pub struct Exchange { pub kind: FragmentKind, pub keys: Vec, pub ignore_exchange: bool, + pub allow_adjust_parallelism: bool, } impl Exchange { @@ -82,6 +83,7 @@ impl PhysicalPlanBuilder { input, kind, keys, + allow_adjust_parallelism: true, ignore_exchange: false, })) } diff --git a/src/query/sql/src/executor/physical_plans/physical_exchange_sink.rs b/src/query/sql/src/executor/physical_plans/physical_exchange_sink.rs index 264f3286bd51..0c4c559064ec 100644 --- a/src/query/sql/src/executor/physical_plans/physical_exchange_sink.rs +++ b/src/query/sql/src/executor/physical_plans/physical_exchange_sink.rs @@ -35,6 +35,7 @@ pub struct ExchangeSink { // Addresses of destination nodes pub query_id: String, pub ignore_exchange: bool, + pub allow_adjust_parallelism: bool, } impl ExchangeSink { From 9e21c23b81fb60aac778e8dbdf487f82a6b178f9 Mon Sep 17 00:00:00 2001 From: JackTan25 <60096118+JackTan25@users.noreply.github.com> Date: Mon, 4 Dec 2023 19:43:19 +0800 Subject: [PATCH 05/16] feat: add merge status (#13903) * add merge status * add output, need to modify tests * add tests * fix * fix * fix * fix * fix test * add merge into output --- src/common/storage/src/lib.rs | 2 + src/common/storage/src/merge.rs | 43 +++++++ src/query/catalog/src/table_context.rs | 5 + .../exchange/serde/exchange_deserializer.rs | 1 + .../api/rpc/exchange/statistics_receiver.rs | 5 + .../src/api/rpc/exchange/statistics_sender.rs | 24 ++++ .../src/api/rpc/packets/packet_data.rs | 13 +++ .../interpreters/interpreter_merge_into.rs | 33 ++++++ .../pipelines/builders/builder_merge_into.rs | 2 + .../serde/transform_deserializer.rs | 1 + src/query/service/src/sessions/query_ctx.rs | 9 ++ .../service/src/sessions/query_ctx_shared.rs | 3 + .../tests/it/sql/exec/get_table_bind_test.rs | 8 ++ .../it/storages/fuse/operations/commit.rs | 9 ++ src/query/sql/src/planner/mod.rs | 3 + src/query/sql/src/planner/plans/merge_into.rs | 64 ++++++++++ src/query/sql/src/planner/plans/mod.rs | 3 + src/query/sql/src/planner/plans/plan.rs | 3 +- .../merge_into/mutator/matched_mutator.rs | 22 +++- .../processor_merge_into_matched_and_split.rs | 7 +- .../processor_merge_into_not_matched.rs | 12 ++ .../09_fuse_engine/09_0026_merge_into.test | 109 +++++++++++++----- .../09_0028_distributed_merge_into.test | 6 +- .../base/09_fuse_engine/09_0032_pr13848.test | 12 +- .../18_rbac/18_0001_udf_priv.result | 2 + 25 files changed, 367 insertions(+), 34 deletions(-) create mode 100644 src/common/storage/src/merge.rs diff --git a/src/common/storage/src/lib.rs b/src/common/storage/src/lib.rs index 2d64e6055775..006f10cef5e3 100644 --- a/src/common/storage/src/lib.rs +++ b/src/common/storage/src/lib.rs @@ -65,10 +65,12 @@ pub use stage::StageFilesInfo; pub use stage::STDIN_FD; mod copy; +mod merge; mod statistics; pub use copy::CopyStatus; pub use copy::FileParseError; pub use copy::FileStatus; +pub use merge::MergeStatus; pub use statistics::Datum; pub use statistics::F64; diff --git a/src/common/storage/src/merge.rs b/src/common/storage/src/merge.rs new file mode 100644 index 000000000000..9400316e0742 --- /dev/null +++ b/src/common/storage/src/merge.rs @@ -0,0 +1,43 @@ +// Copyright 2021 Datafuse Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use serde::Deserialize; +use serde::Serialize; + +#[derive(Default, Clone, Serialize, Deserialize)] +pub struct MergeStatus { + pub insert_rows: usize, + pub deleted_rows: usize, + pub update_rows: usize, +} + +impl MergeStatus { + pub fn add_insert_rows(&mut self, insert_rows: usize) { + self.insert_rows += insert_rows; + } + + pub fn add_deleted_rows(&mut self, deleted_rows: usize) { + self.deleted_rows += deleted_rows + } + + pub fn add_update_rows(&mut self, update_rows: usize) { + self.update_rows += update_rows + } + + pub fn merge_status(&mut self, merge_status: MergeStatus) { + self.insert_rows += merge_status.insert_rows; + self.deleted_rows += merge_status.deleted_rows; + self.update_rows += merge_status.update_rows; + } +} diff --git a/src/query/catalog/src/table_context.rs b/src/query/catalog/src/table_context.rs index d496ae4a5980..8b6988e11cdb 100644 --- a/src/query/catalog/src/table_context.rs +++ b/src/query/catalog/src/table_context.rs @@ -39,6 +39,7 @@ use common_settings::Settings; use common_storage::CopyStatus; use common_storage::DataOperator; use common_storage::FileStatus; +use common_storage::MergeStatus; use common_storage::StageFileInfo; use common_storage::StorageMetrics; use common_users::GrantObjectVisibilityChecker; @@ -220,6 +221,10 @@ pub trait TableContext: Send + Sync { fn get_copy_status(&self) -> Arc; + fn add_merge_status(&self, merge_status: MergeStatus); + + fn get_merge_status(&self) -> Arc>; + /// Get license key from context, return empty if license is not found or error happened. fn get_license_key(&self) -> String; } diff --git a/src/query/service/src/api/rpc/exchange/serde/exchange_deserializer.rs b/src/query/service/src/api/rpc/exchange/serde/exchange_deserializer.rs index b83bf3c4c39a..ec39fe08f552 100644 --- a/src/query/service/src/api/rpc/exchange/serde/exchange_deserializer.rs +++ b/src/query/service/src/api/rpc/exchange/serde/exchange_deserializer.rs @@ -129,6 +129,7 @@ impl BlockMetaTransform for TransformExchangeDeserializ DataPacket::FetchProgress => unreachable!(), DataPacket::SerializeProgress { .. } => unreachable!(), DataPacket::CopyStatus { .. } => unreachable!(), + DataPacket::MergeStatus { .. } => unreachable!(), DataPacket::FragmentData(v) => self.recv_data(meta.packet, v), } } diff --git a/src/query/service/src/api/rpc/exchange/statistics_receiver.rs b/src/query/service/src/api/rpc/exchange/statistics_receiver.rs index f0293abede7f..0fff98c7fbda 100644 --- a/src/query/service/src/api/rpc/exchange/statistics_receiver.rs +++ b/src/query/service/src/api/rpc/exchange/statistics_receiver.rs @@ -134,6 +134,11 @@ impl StatisticsReceiver { ctx.get_copy_status().merge(status); Ok(false) } + Ok(Some(DataPacket::MergeStatus(status))) => { + log::info!("merge MergeStatus"); + ctx.get_merge_status().write().merge_status(status); + Ok(false) + } } } diff --git a/src/query/service/src/api/rpc/exchange/statistics_sender.rs b/src/query/service/src/api/rpc/exchange/statistics_sender.rs index c33470ec2649..4ccada1f363e 100644 --- a/src/query/service/src/api/rpc/exchange/statistics_sender.rs +++ b/src/query/service/src/api/rpc/exchange/statistics_sender.rs @@ -22,6 +22,7 @@ use common_base::runtime::TrySpawn; use common_catalog::table_context::TableContext; use common_exception::ErrorCode; use common_exception::Result; +use common_storage::MergeStatus; use futures_util::future::Either; use log::warn; @@ -86,6 +87,10 @@ impl StatisticsSender { warn!("CopyStatus send has error, cause: {:?}.", error); } + if let Err(error) = Self::send_merge_status(&ctx, &tx).await { + warn!("MergeStatus send has error, cause: {:?}.", error); + } + if let Err(error) = Self::send_statistics(&ctx, &tx).await { warn!("Statistics send has error, cause: {:?}.", error); } @@ -137,6 +142,25 @@ impl StatisticsSender { Ok(()) } + #[async_backtrace::framed] + async fn send_merge_status( + ctx: &Arc, + flight_sender: &FlightSender, + ) -> Result<()> { + let merge_status = { + let binding = ctx.get_merge_status(); + let status = binding.read(); + MergeStatus { + insert_rows: status.insert_rows, + deleted_rows: status.deleted_rows, + update_rows: status.update_rows, + } + }; + let data_packet = DataPacket::MergeStatus(merge_status); + flight_sender.send(data_packet).await?; + Ok(()) + } + fn fetch_progress(ctx: &Arc) -> Result> { let mut progress_info = vec![]; diff --git a/src/query/service/src/api/rpc/packets/packet_data.rs b/src/query/service/src/api/rpc/packets/packet_data.rs index b504f0884dc6..fb90948e2f4e 100644 --- a/src/query/service/src/api/rpc/packets/packet_data.rs +++ b/src/query/service/src/api/rpc/packets/packet_data.rs @@ -23,6 +23,7 @@ use common_arrow::arrow_format::flight::data::FlightData; use common_exception::ErrorCode; use common_exception::Result; use common_storage::CopyStatus; +use common_storage::MergeStatus; use log::error; use crate::api::rpc::packets::ProgressInfo; @@ -55,6 +56,7 @@ pub enum DataPacket { FetchProgress, SerializeProgress(Vec), CopyStatus(CopyStatus), + MergeStatus(MergeStatus), } fn calc_size(flight_data: &FlightData) -> usize { @@ -67,6 +69,7 @@ impl DataPacket { DataPacket::ErrorCode(_) => 0, DataPacket::FetchProgress => 0, DataPacket::CopyStatus(_) => 0, + DataPacket::MergeStatus(_) => 0, DataPacket::SerializeProgress(_) => 0, DataPacket::Dictionary(v) => calc_size(v), DataPacket::FragmentData(v) => calc_size(&v.data) + v.meta.len(), @@ -116,6 +119,12 @@ impl TryFrom for FlightData { data_header: vec![], flight_descriptor: None, }, + DataPacket::MergeStatus(status) => FlightData { + app_metadata: vec![0x07], + data_body: serde_json::to_vec(&status)?, + data_header: vec![], + flight_descriptor: None, + }, }) } } @@ -163,6 +172,10 @@ impl TryFrom for DataPacket { let status = serde_json::from_slice::(&flight_data.data_body)?; Ok(DataPacket::CopyStatus(status)) } + 0x07 => { + let status = serde_json::from_slice::(&flight_data.data_body)?; + Ok(DataPacket::MergeStatus(status)) + } _ => Err(ErrorCode::BadBytes("Unknown flight data packet type.")), } } diff --git a/src/query/service/src/interpreters/interpreter_merge_into.rs b/src/query/service/src/interpreters/interpreter_merge_into.rs index 8dac26c351a4..1257b9c41845 100644 --- a/src/query/service/src/interpreters/interpreter_merge_into.rs +++ b/src/query/service/src/interpreters/interpreter_merge_into.rs @@ -20,11 +20,15 @@ use std::u64::MAX; use common_catalog::table::TableExt; use common_exception::ErrorCode; use common_exception::Result; +use common_expression::types::UInt32Type; use common_expression::ConstantFolder; +use common_expression::DataBlock; use common_expression::DataSchema; use common_expression::DataSchemaRef; use common_expression::FieldIndex; +use common_expression::FromData; use common_expression::RemoteExpr; +use common_expression::SendableDataBlockStream; use common_expression::ROW_NUMBER_COL_NAME; use common_functions::BUILTIN_FUNCTIONS; use common_meta_app::schema::TableInfo; @@ -38,6 +42,7 @@ use common_sql::executor::physical_plans::MergeIntoSource; use common_sql::executor::physical_plans::MutationKind; use common_sql::executor::PhysicalPlan; use common_sql::executor::PhysicalPlanBuilder; +use common_sql::plans; use common_sql::plans::MergeInto as MergePlan; use common_sql::plans::RelOperator; use common_sql::plans::UpdatePlan; @@ -59,6 +64,7 @@ use crate::interpreters::InterpreterPtr; use crate::pipelines::PipelineBuildResult; use crate::schedulers::build_query_pipeline_without_render_result_set; use crate::sessions::QueryContext; +use crate::stream::DataBlockStream; // predicate_index should not be conflict with update expr's column_binding's index. pub const PREDICATE_COLUMN_INDEX: IndexType = MAX as usize; @@ -119,6 +125,11 @@ impl Interpreter for MergeIntoInterpreter { Ok(build_res) } + + fn inject_result(&self) -> Result { + let blocks = self.get_merge_into_table_result()?; + Ok(Box::pin(DataBlockStream::create(None, blocks))) + } } impl MergeIntoInterpreter { @@ -443,4 +454,26 @@ impl MergeIntoInterpreter { ); Ok(filer.as_remote_expr()) } + + fn get_merge_into_table_result(&self) -> Result> { + let binding = self.ctx.get_merge_status(); + let status = binding.read(); + let schema = self.plan.schema(); + let mut columns = Vec::new(); + for field in schema.as_ref().fields() { + match field.name().as_str() { + plans::INSERT_NAME => { + columns.push(UInt32Type::from_data(vec![status.insert_rows as u32])) + } + plans::UPDTAE_NAME => { + columns.push(UInt32Type::from_data(vec![status.update_rows as u32])) + } + plans::DELETE_NAME => { + columns.push(UInt32Type::from_data(vec![status.deleted_rows as u32])) + } + _ => unreachable!(), + } + } + Ok(vec![DataBlock::new_from_columns(columns)]) + } } diff --git a/src/query/service/src/pipelines/builders/builder_merge_into.rs b/src/query/service/src/pipelines/builders/builder_merge_into.rs index a8b3b2a36aaf..8b8ee2c2ce0a 100644 --- a/src/query/service/src/pipelines/builders/builder_merge_into.rs +++ b/src/query/service/src/pipelines/builders/builder_merge_into.rs @@ -141,6 +141,7 @@ impl PipelineBuilder { unmatched.clone(), input_schema.clone(), self.func_ctx.clone(), + self.ctx.clone(), )?; let pipe_items = vec![ merge_into_not_matched_processor.into_pipe_item(), @@ -400,6 +401,7 @@ impl PipelineBuilder { unmatched.clone(), input.output_schema()?, self.func_ctx.clone(), + self.ctx.clone(), )?; pipe_items.push(merge_into_not_matched_processor.into_pipe_item()); } else { diff --git a/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_deserializer.rs b/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_deserializer.rs index 568e3167d298..85d42d0994d1 100644 --- a/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_deserializer.rs +++ b/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_deserializer.rs @@ -223,6 +223,7 @@ where DataPacket::FetchProgress => unreachable!(), DataPacket::SerializeProgress { .. } => unreachable!(), DataPacket::CopyStatus { .. } => unreachable!(), + DataPacket::MergeStatus { .. } => unreachable!(), DataPacket::FragmentData(v) => self.recv_data(meta.packet, v), } } diff --git a/src/query/service/src/sessions/query_ctx.rs b/src/query/service/src/sessions/query_ctx.rs index 498853779a49..a9f883f96a92 100644 --- a/src/query/service/src/sessions/query_ctx.rs +++ b/src/query/service/src/sessions/query_ctx.rs @@ -70,6 +70,7 @@ use common_sql::IndexType; use common_storage::CopyStatus; use common_storage::DataOperator; use common_storage::FileStatus; +use common_storage::MergeStatus; use common_storage::StageFileInfo; use common_storage::StorageMetrics; use common_storages_fuse::TableContext; @@ -804,6 +805,14 @@ impl TableContext for QueryContext { self.shared.copy_status.clone() } + fn add_merge_status(&self, merge_status: MergeStatus) { + self.shared.merge_status.write().merge_status(merge_status) + } + + fn get_merge_status(&self) -> Arc> { + self.shared.merge_status.clone() + } + fn get_license_key(&self) -> String { unsafe { self.get_settings() diff --git a/src/query/service/src/sessions/query_ctx_shared.rs b/src/query/service/src/sessions/query_ctx_shared.rs index bfda073c04b5..6fa346e439d8 100644 --- a/src/query/service/src/sessions/query_ctx_shared.rs +++ b/src/query/service/src/sessions/query_ctx_shared.rs @@ -35,6 +35,7 @@ use common_pipeline_core::InputError; use common_settings::Settings; use common_storage::CopyStatus; use common_storage::DataOperator; +use common_storage::MergeStatus; use common_storage::StorageMetrics; use dashmap::DashMap; use parking_lot::Mutex; @@ -89,6 +90,7 @@ pub struct QueryContextShared { Arc>>>>>, pub(in crate::sessions) on_error_mode: Arc>>, pub(in crate::sessions) copy_status: Arc, + pub(in crate::sessions) merge_status: Arc>, /// partitions_sha for each table in the query. Not empty only when enabling query result cache. pub(in crate::sessions) partitions_shas: Arc>>, pub(in crate::sessions) cacheable: Arc, @@ -131,6 +133,7 @@ impl QueryContextShared { on_error_map: Arc::new(RwLock::new(None)), on_error_mode: Arc::new(RwLock::new(None)), copy_status: Arc::new(Default::default()), + merge_status: Arc::new(Default::default()), partitions_shas: Arc::new(RwLock::new(vec![])), cacheable: Arc::new(AtomicBool::new(true)), can_scan_from_agg_index: Arc::new(AtomicBool::new(true)), diff --git a/src/query/service/tests/it/sql/exec/get_table_bind_test.rs b/src/query/service/tests/it/sql/exec/get_table_bind_test.rs index 44e1391558ed..cefe5c89595d 100644 --- a/src/query/service/tests/it/sql/exec/get_table_bind_test.rs +++ b/src/query/service/tests/it/sql/exec/get_table_bind_test.rs @@ -108,6 +108,7 @@ use common_sql::Planner; use common_storage::CopyStatus; use common_storage::DataOperator; use common_storage::FileStatus; +use common_storage::MergeStatus; use common_storage::StageFileInfo; use common_users::GrantObjectVisibilityChecker; use dashmap::DashMap; @@ -698,6 +699,13 @@ impl TableContext for CtxDelegation { fn get_queries_profile(&self) -> HashMap>> { todo!() } + fn add_merge_status(&self, _merge_status: MergeStatus) { + todo!() + } + + fn get_merge_status(&self) -> Arc> { + todo!() + } } #[tokio::test(flavor = "multi_thread")] diff --git a/src/query/service/tests/it/storages/fuse/operations/commit.rs b/src/query/service/tests/it/storages/fuse/operations/commit.rs index c00657e403cf..591968602454 100644 --- a/src/query/service/tests/it/storages/fuse/operations/commit.rs +++ b/src/query/service/tests/it/storages/fuse/operations/commit.rs @@ -106,6 +106,7 @@ use common_settings::Settings; use common_storage::CopyStatus; use common_storage::DataOperator; use common_storage::FileStatus; +use common_storage::MergeStatus; use common_storage::StageFileInfo; use common_storages_fuse::FuseTable; use common_storages_fuse::FUSE_TBL_SNAPSHOT_PREFIX; @@ -659,6 +660,14 @@ impl TableContext for CtxDelegation { fn get_queries_profile(&self) -> HashMap>> { todo!() } + + fn add_merge_status(&self, _merge_status: MergeStatus) { + todo!() + } + + fn get_merge_status(&self) -> Arc> { + todo!() + } } #[derive(Clone, Debug)] diff --git a/src/query/sql/src/planner/mod.rs b/src/query/sql/src/planner/mod.rs index 875c861740a1..13f5162b67d0 100644 --- a/src/query/sql/src/planner/mod.rs +++ b/src/query/sql/src/planner/mod.rs @@ -43,5 +43,8 @@ pub use planner::PlanExtras; pub use planner::Planner; pub use plans::insert::InsertInputSource; pub use plans::ScalarExpr; +pub use plans::DELETE_NAME; +pub use plans::INSERT_NAME; +pub use plans::UPDTAE_NAME; pub use semantic::*; pub use stream_column::*; diff --git a/src/query/sql/src/planner/plans/merge_into.rs b/src/query/sql/src/planner/plans/merge_into.rs index 4b5e464d0016..a553dc9fd613 100644 --- a/src/query/sql/src/planner/plans/merge_into.rs +++ b/src/query/sql/src/planner/plans/merge_into.rs @@ -16,7 +16,13 @@ use std::collections::HashMap; use std::collections::HashSet; use common_ast::ast::TableAlias; +use common_exception::ErrorCode; +use common_exception::Result; +use common_expression::types::DataType; +use common_expression::types::NumberDataType; +use common_expression::DataField; use common_expression::DataSchemaRef; +use common_expression::DataSchemaRefExt; use common_expression::FieldIndex; use common_meta_types::MetaId; @@ -77,3 +83,61 @@ impl std::fmt::Debug for MergeInto { .finish() } } + +pub const INSERT_NAME: &str = "number of rows insertd"; +pub const UPDTAE_NAME: &str = "number of rows updated"; +pub const DELETE_NAME: &str = "number of rows deleted"; + +impl MergeInto { + // the order of output should be (insert, update, delete),this is + // consistent with snowflake. + fn merge_into_mutations(&self) -> (bool, bool, bool) { + let insert = matches!(self.merge_type, MergeIntoType::FullOperation) + || matches!(self.merge_type, MergeIntoType::InsertOnly); + let mut update = false; + let mut delete = false; + for evaluator in &self.matched_evaluators { + if evaluator.update.is_none() { + delete = true + } else { + update = true + } + } + (insert, update, delete) + } + + fn merge_into_table_schema(&self) -> Result { + let field_insertd = DataField::new(INSERT_NAME, DataType::Number(NumberDataType::Int32)); + let field_updated = DataField::new(UPDTAE_NAME, DataType::Number(NumberDataType::Int32)); + let field_deleted = DataField::new(DELETE_NAME, DataType::Number(NumberDataType::Int32)); + match self.merge_into_mutations() { + (true, true, true) => Ok(DataSchemaRefExt::create(vec![ + field_insertd.clone(), + field_updated.clone(), + field_deleted.clone(), + ])), + (true, true, false) => Ok(DataSchemaRefExt::create(vec![ + field_insertd.clone(), + field_updated.clone(), + ])), + (true, false, true) => Ok(DataSchemaRefExt::create(vec![ + field_insertd.clone(), + field_deleted.clone(), + ])), + (true, false, false) => Ok(DataSchemaRefExt::create(vec![field_insertd.clone()])), + (false, true, true) => Ok(DataSchemaRefExt::create(vec![ + field_updated.clone(), + field_deleted.clone(), + ])), + (false, true, false) => Ok(DataSchemaRefExt::create(vec![field_updated.clone()])), + (false, false, true) => Ok(DataSchemaRefExt::create(vec![field_deleted.clone()])), + _ => Err(ErrorCode::BadArguments( + "at least one matched or unmatched clause for merge into", + )), + } + } + + pub fn schema(&self) -> DataSchemaRef { + self.merge_into_table_schema().unwrap() + } +} diff --git a/src/query/sql/src/planner/plans/mod.rs b/src/query/sql/src/planner/plans/mod.rs index dd0b998840ed..b971477e0cd4 100644 --- a/src/query/sql/src/planner/plans/mod.rs +++ b/src/query/sql/src/planner/plans/mod.rs @@ -75,6 +75,9 @@ pub use materialized_cte::MaterializedCte; pub use merge_into::MatchedEvaluator; pub use merge_into::MergeInto; pub use merge_into::UnmatchedEvaluator; +pub use merge_into::DELETE_NAME; +pub use merge_into::INSERT_NAME; +pub use merge_into::UPDTAE_NAME; pub use operator::*; pub use pattern::PatternPlan; pub use plan::*; diff --git a/src/query/sql/src/planner/plans/plan.rs b/src/query/sql/src/planner/plans/plan.rs index f18f162464ee..ca0e192ac066 100644 --- a/src/query/sql/src/planner/plans/plan.rs +++ b/src/query/sql/src/planner/plans/plan.rs @@ -410,7 +410,7 @@ impl Plan { Plan::DescNetworkPolicy(plan) => plan.schema(), Plan::ShowNetworkPolicies(plan) => plan.schema(), Plan::CopyIntoTable(plan) => plan.schema(), - + Plan::MergeInto(plan) => plan.schema(), Plan::CreateTask(plan) => plan.schema(), Plan::DescribeTask(plan) => plan.schema(), Plan::ShowTasks(plan) => plan.schema(), @@ -457,6 +457,7 @@ impl Plan { | Plan::DescribeTask(_) | Plan::DescConnection(_) | Plan::ShowConnections(_) + | Plan::MergeInto(_) ) } } diff --git a/src/query/storages/fuse/src/operations/merge_into/mutator/matched_mutator.rs b/src/query/storages/fuse/src/operations/merge_into/mutator/matched_mutator.rs index 05188bc5f15c..9bf252f341bd 100644 --- a/src/query/storages/fuse/src/operations/merge_into/mutator/matched_mutator.rs +++ b/src/query/storages/fuse/src/operations/merge_into/mutator/matched_mutator.rs @@ -37,6 +37,7 @@ use common_expression::Column; use common_expression::DataBlock; use common_expression::TableSchemaRef; use common_metrics::storage::*; +use common_storage::MergeStatus; use itertools::Itertools; use log::info; use opendal::Operator; @@ -71,6 +72,7 @@ struct AggregationContext { } pub struct MatchedAggregator { + ctx: Arc, io_request_semaphore: Arc, segment_reader: CompactSegmentInfoReader, segment_locations: AHashMap, @@ -109,7 +111,7 @@ impl MatchedAggregator { Ok(Self { aggregation_ctx: Arc::new(AggregationContext { - ctx, + ctx: ctx.clone(), write_settings, read_settings, data_accessor, @@ -120,6 +122,7 @@ impl MatchedAggregator { segment_reader, block_mutation_row_offset: HashMap::new(), segment_locations: AHashMap::from_iter(segment_locations), + ctx: ctx.clone(), }) } @@ -153,6 +156,23 @@ impl MatchedAggregator { RowIdKind::Delete => { for row_id in row_ids { let (prefix, offset) = split_row_id(row_id); + let value = self.block_mutation_row_offset.get(&prefix); + if value.is_none() { + self.ctx.add_merge_status(MergeStatus { + insert_rows: 0, + update_rows: 0, + deleted_rows: 1, + }); + } else { + let s = value.unwrap(); + if !s.1.contains(&(offset as usize)) { + self.ctx.add_merge_status(MergeStatus { + insert_rows: 0, + update_rows: 0, + deleted_rows: 1, + }); + } + } // support idempotent delete self.block_mutation_row_offset .entry(prefix) diff --git a/src/query/storages/fuse/src/operations/merge_into/processors/processor_merge_into_matched_and_split.rs b/src/query/storages/fuse/src/operations/merge_into/processors/processor_merge_into_matched_and_split.rs index dc6702be46bf..c8b99c6a2dd4 100644 --- a/src/query/storages/fuse/src/operations/merge_into/processors/processor_merge_into_matched_and_split.rs +++ b/src/query/storages/fuse/src/operations/merge_into/processors/processor_merge_into_matched_and_split.rs @@ -37,6 +37,7 @@ use common_pipeline_core::processors::ProcessorPtr; use common_pipeline_core::PipeItem; use common_sql::evaluator::BlockOperator; use common_sql::executor::physical_plans::MatchExpr; +use common_storage::MergeStatus; use crate::operations::common::MutationLogs; use crate::operations::merge_into::mutator::DeleteByExprMutator; @@ -259,7 +260,6 @@ impl Processor for MatchedSplitProcessor { } } - // Todo:(JackTan25) accutally, we should do insert-only optimization in the future. fn process(&mut self) -> Result<()> { if let Some(data_block) = self.input_data.take() { if data_block.is_empty() { @@ -308,6 +308,11 @@ impl Processor for MatchedSplitProcessor { current_block = current_block.filter_boolean_value(&filter)?; if !current_block.is_empty() { // add updated row_ids + self.ctx.add_merge_status(MergeStatus { + insert_rows: 0, + update_rows: current_block.num_rows(), + deleted_rows: 0, + }); self.output_data_row_id_data.push(DataBlock::new_with_meta( vec![current_block.get_by_offset(self.row_id_idx).clone()], current_block.num_rows(), diff --git a/src/query/storages/fuse/src/operations/merge_into/processors/processor_merge_into_not_matched.rs b/src/query/storages/fuse/src/operations/merge_into/processors/processor_merge_into_not_matched.rs index d4220ff23cc8..3dc3d44c6c79 100644 --- a/src/query/storages/fuse/src/operations/merge_into/processors/processor_merge_into_not_matched.rs +++ b/src/query/storages/fuse/src/operations/merge_into/processors/processor_merge_into_not_matched.rs @@ -18,6 +18,7 @@ use std::collections::HashSet; use std::sync::Arc; use std::time::Instant; +use common_catalog::table_context::TableContext; use common_exception::Result; use common_expression::DataBlock; use common_expression::DataSchemaRef; @@ -32,6 +33,7 @@ use common_pipeline_core::processors::Processor; use common_pipeline_core::processors::ProcessorPtr; use common_pipeline_core::PipeItem; use common_sql::evaluator::BlockOperator; +use common_storage::MergeStatus; use itertools::Itertools; use crate::operations::merge_into::mutator::SplitByExprMutator; @@ -54,6 +56,7 @@ pub struct MergeIntoNotMatchedProcessor { func_ctx: FunctionContext, // data_schemas[i] means the i-th op's result block's schema. data_schemas: HashMap, + ctx: Arc, } impl MergeIntoNotMatchedProcessor { @@ -61,6 +64,7 @@ impl MergeIntoNotMatchedProcessor { unmatched: UnMatchedExprs, input_schema: DataSchemaRef, func_ctx: FunctionContext, + ctx: Arc, ) -> Result { let mut ops = Vec::::with_capacity(unmatched.len()); let mut data_schemas = HashMap::with_capacity(unmatched.len()); @@ -93,6 +97,7 @@ impl MergeIntoNotMatchedProcessor { output_data: Vec::new(), func_ctx, data_schemas, + ctx, }) } @@ -164,6 +169,13 @@ impl Processor for MergeIntoNotMatchedProcessor { metrics_inc_merge_into_append_blocks_rows_counter( satisfied_block.num_rows() as u32 ); + + self.ctx.add_merge_status(MergeStatus { + insert_rows: satisfied_block.num_rows(), + update_rows: 0, + deleted_rows: 0, + }); + self.output_data .push(op.op.execute(&self.func_ctx, satisfied_block)?) } diff --git a/tests/sqllogictests/suites/base/09_fuse_engine/09_0026_merge_into.test b/tests/sqllogictests/suites/base/09_fuse_engine/09_0026_merge_into.test index e70c79a9d961..7a7ec128d1f4 100644 --- a/tests/sqllogictests/suites/base/09_fuse_engine/09_0026_merge_into.test +++ b/tests/sqllogictests/suites/base/09_fuse_engine/09_0026_merge_into.test @@ -52,8 +52,10 @@ merge into t1 using (select * from t2 ) on t1.a = t2.a when matched then update statement error 1006 merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then update set t1.c = t2.c,t1.c = t2.c; -statement ok +query T merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then update set t1.c = t2.c; +---- +4 query TTT select * from t1 order by a,b,c; @@ -74,8 +76,10 @@ select * from t2 order by a,b,c; 3 b_6 c_6 4 b_8 c_8 -statement ok +query TT merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then update set t1.c = t2.c when not matched then insert (a,b,c) values(t2.a,t2.b,t2.c); +---- +1 4 query TTT select * from t1 order by a,b,c; @@ -122,8 +126,10 @@ select * from t2 order by a,b,c; 4 b_8 c_8 5 b_9 c_9 -statement ok +query TT merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then delete; +---- +4 query ITT select * from t1 order by a,b,c; @@ -144,8 +150,10 @@ select * from t1 order by a,b,c; statement error 1065 merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then delete when matched then update set t1.c = t2.c when not matched and t2.c = 'c_8' then insert (a,b,c) values(t2.a,t2.b,t2.c); -statement ok +query TTT merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched and t1.b = 'b_1' then delete when matched then update set t1.c = t2.c when not matched and t2.c = 'c_8' then insert (a,b,c) values(t2.a,t2.b,t2.c); +---- +1 1 1 query TTT select * from t1 order by a,b,c; @@ -154,8 +162,10 @@ select * from t1 order by a,b,c; 3 b_2 c_6 4 b_8 c_8 -statement ok +query TT merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then delete when not matched and t2.c = 'c_9' then insert (a,b,c) values(t2.a,t2.b,t2.c); +---- +1 2 query TTT select * from t1 order by a,b,c; @@ -163,8 +173,10 @@ select * from t1 order by a,b,c; 1 b1 c_5 5 b_9 c_9 -statement ok +query T merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when not matched and t2.c = 'c_8' then insert (a,b) values(t2.a,t2.b) when not matched and t2.c = 'c_7' then insert (a,c) values(t2.a,t2.c); +---- +2 query TTT select * from t1 order by a,b,c; @@ -190,8 +202,10 @@ statement error 4001 merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched and t2.c = 'c_9' then update set t1.b = 'b_11' when matched and t2.c = 'c_10' then delete; ## idempotent delete test -statement ok +query T merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then delete; +---- +3 query TTT select * from t1 order by a,b,c; @@ -237,8 +251,11 @@ select * from t2 order by a,b,c; statement error 1065 merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when not matched then insert values(t2.a,t2.c); -statement ok +query TT merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then update * when not matched then insert *; +---- +1 1 + query TTT select * from t1 order by a,b,c; @@ -309,8 +326,10 @@ select $1,$2,$3 from @s5_merge_into order by $1,$2,$3; 3 a3 b3 ## test CSV -statement ok +query TT merge into target_table using (select $1,$2,$3 from @s5_merge_into) as cdc on cast(cdc.$1 as int) = target_table.a when matched then delete when not matched then insert values(cdc.$1,cdc.$2,cdc.$3); +---- +1 2 query TTT select * from target_table order by a,b,c; @@ -351,8 +370,10 @@ select * from target_table order by a,b,c; 1 a_1 b_1 2 a_2 b_2 -statement ok +query TT merge into target_table using (select $1,$2,$3 from @s4_merge_into) as cdc on cdc.$1 = target_table.a when matched then delete when not matched then insert values(cdc.$1,cdc.$2,cdc.$3); +---- +1 2 query TTT select * from target_table order by a,b,c; @@ -395,8 +416,10 @@ select * from t2 order by a,b,c; 2 b_2 c_2 NULL b_3 c_3 -statement ok +query TT merge into t1 using (select * from t2) as t2 on t1.a = t2.a when matched then delete when not matched then insert *; +---- +3 0 query TTT select * from t1 order by a,b,c; @@ -406,8 +429,10 @@ select * from t1 order by a,b,c; NULL b_1 c_1 NULL b_3 c_3 -statement ok +query T merge into t1 using (select * from t2) as t2 on t1.a = t2.a when matched then delete; +---- +2 query TTT select * from t1 order by a,b,c; @@ -435,8 +460,10 @@ select count(*) from t2; statement ok insert into t2 values(1,'a1','b1'); -statement ok +query TT merge into t1 as t3 using (select * from t2 ) as t2 on t3.a = t2.a when not matched then insert (a,b,c) values(t2.a,t2.b,t2.c); +---- +1 query TTT select * from t1 order by a,b,c; @@ -455,8 +482,10 @@ INSERT INTO employees VALUES(1, 'Alice', 'HR'),(2, 'Bob', 'IT'),(3, 'Charlie', ' statement ok INSERT INTO salaries VALUES(1, 50000.00),(2, 60000.00); -statement ok +query TT MERGE INTO salaries USING (SELECT * FROM employees) as employees ON salaries.employee_id = employees.employee_id WHEN MATCHED AND employees.department = 'HR' THEN UPDATE SET salaries.salary = salaries.salary + 1000.00 WHEN MATCHED THEN UPDATE SET salaries.salary = salaries.salary + 500.00 WHEN NOT MATCHED THEN INSERT (employee_id, salary) VALUES (employees.employee_id, 55000.00); +---- +2 2 query TTT select * from salaries order by employee_id; @@ -479,8 +508,10 @@ insert into t1_target values(1); statement ok insert into t2_source values(1),(2); -statement ok +query TT merge into t1_target using (select * from t2_source) as t2_source on t1_target.a = t2_source.a when matched then update * when not matched then insert *; +---- +1 1 query T select * from t1_target order by a; @@ -505,8 +536,10 @@ merge into cluster_target as t1 using (select * from cluster_source) as t2 on t1 statement error 1006 merge into cluster_target as t1 using (select * from cluster_source) as t2 on t1.a = t2.a when matched then update set t2.a = t2.a; -statement ok +query TT merge into cluster_target as t1 using (select * from cluster_source) as t2 on t1.a = t2.a when not matched then insert *; +---- +5 # By default setting, all rows merged from `cluster_source` will be resident in a single block of `cluster_target`, # as table `cluster_target` is clustered by `(a,b)`, the rows inside the one block are assumed to be sorted @@ -549,9 +582,11 @@ remove @source_parquet; statement ok copy into @source_parquet from (select * from source_test); -statement ok +query TTT merge into `target_test` as tt using (select `a`,`b`,`delete_flag` from @source_parquet (pattern => '.*[.]parquet')) as ss on (ss.`a` = tt.`a`) when matched and ss.`delete_flag` = true then delete when matched then update * when not matched and ss.`delete_flag` = false then insert *; +---- +1 1 2 query TT select * from target_test order by a; @@ -601,8 +636,10 @@ create table tt1 (a int, b int); statement error 1065 merge into tt1 using(select 10, 20) as tt2 on tt1.a = 1 when not matched and tt1.b = 2 then insert values (10, 20); -statement ok +query TT merge into tt1 using(select 10 as a, 20 as b) as tt2 on tt1.a = 1 when not matched and tt2.b = 2 then insert values (10, 20); +---- +0 query T select count(*) from tt1; @@ -616,8 +653,10 @@ create table tt2(a bool, b variant, c map(string, string)); statement ok insert into tt2 values (true, '10', {'k1':'v1'}), (false, '20', {'k2':'v2'}) -statement ok +query T merge into tt2 using(select true as x) as t on (x and tt2.a) when matched and tt2.a then update set tt2.b = parse_json('30'); +---- +1 query TTT select a, b, c from tt2 order by b; @@ -644,8 +683,10 @@ insert into t1 values(1); statement ok insert into t2 values(1),(2); -statement ok +query TT merge into t1 using t2 on t1.a = t2.a when matched then delete when not matched then insert *; +---- +1 1 query T select * from t1; @@ -706,16 +747,20 @@ select * from t2; 2 b2 0 3 b3 1 -statement ok +query TT merge into t1 using t2 on t1.a = t2.a when matched and t1.a = 1 then delete when matched and t1.a = 2 then update * when matched and t1.a = 3 then delete; +---- +1 2 query TTT select * from t1; ---- 2 b2 0 -statement ok +query T merge into t1 using t2 on t1.a = t2.a when matched then delete; +---- +1 query T select count(*) from t1; @@ -725,8 +770,10 @@ select count(*) from t1; statement ok insert into t1 values(1,'a1',true),(2,'a2',false),(3,'a3',true); -statement ok +query TT merge into t1 using t2 on t1.a = t2.a when matched and t1.a = 2 then update * when matched and t1.a = 1 then delete when matched and t1.a = 3 then update *; +---- +2 1 query TTT select * from t1; @@ -744,8 +791,10 @@ create table tt1(a bool, b int); statement ok insert into tt1 values (true, 1), (false, 2); -statement ok +query T merge into tt1 using (select 1 as x) as tt2 on (2 > 1) when matched and a then delete; +---- +1 query TT select * from tt1; @@ -771,8 +820,10 @@ insert into t11 values (1, 10),(2, 20),(3, 30),(4, 40); statement ok insert into t12 values (1, 10),(2, 20),(3, 30),(4, 40); -statement ok +query T MERGE INTO t11 USING(SELECT NULL AS c0 FROM t12) AS t12 ON (t11.a OR TRUE) WHEN MATCHED AND TRUE THEN DELETE; +---- +4 query T select count(*) from t11; @@ -933,7 +984,7 @@ insert into orders values(200007,7,'buy','BTC',4.81412194,48.14121943,'completed (200098,98,'buy','BTC',1.37252960,13.72529599,'completed',to_date('2021-01-01'),to_date('2021-01-01')), (200102,102,'buy','BTC',1.53596481,15.35964815,'completed',to_date('2021-01-01'),to_date('2021-01-01')); -statement ok +query T MERGE INTO orders USING ( SELECT o.order_id, o.user_id, o.order_type, o.asset_type, o.quantity + a.avg_quantity AS new_quantity, o.price, o.status, o.created_at, o.updated_at FROM orders o @@ -945,6 +996,8 @@ MERGE INTO orders USING ( ) AS joined_data ON orders.order_id = joined_data.order_id WHEN MATCHED THEN UPDATE SET orders.quantity = joined_data.new_quantity; +---- +10 query TTTT SELECT SUM(quantity) AS total_quantity, @@ -964,8 +1017,10 @@ create table tmp_01 like tb_01; statement ok insert into tmp_01 values(1,'abc',to_date('2023-11-29'),parse_json('{"a":1}')); -statement ok +query TT merge into tb_01 as T using ( select * from tmp_01) as S on t.id = s.id when matched then update * when not matched then insert *; +---- +1 0 query TTT select id,c1,to_date(c2),c3 from tb_01; diff --git a/tests/sqllogictests/suites/base/09_fuse_engine/09_0028_distributed_merge_into.test b/tests/sqllogictests/suites/base/09_fuse_engine/09_0028_distributed_merge_into.test index dd73d0c92c93..4045faeda154 100644 --- a/tests/sqllogictests/suites/base/09_fuse_engine/09_0028_distributed_merge_into.test +++ b/tests/sqllogictests/suites/base/09_fuse_engine/09_0028_distributed_merge_into.test @@ -61,9 +61,11 @@ remove @distributed_source_parquet; statement ok copy into @distributed_source_parquet from (select * from distributed_source_test); -statement ok +query TTT merge into `distributed_target_test` as tt using (select `a`,`b`,`is_databend_deleted` from @distributed_source_parquet (pattern => '.*[.]parquet')) as ss on (ss.`a` = tt.`a`) when matched and ss.`is_databend_deleted` = true then delete when matched then update * when not matched and ss.`is_databend_deleted` = false then insert *; +---- +2 4 8 query TT select * from distributed_target_test order by a; @@ -123,6 +125,8 @@ select * from corner_source_table order by a,b; query TTT merge into corner_target_table as t1 using (select * from corner_source_table) as t2 on t1.a = t2.a when matched then update * when not matched then insert *; +---- +0 5 query TTT select * from corner_target_table order by a,b; diff --git a/tests/sqllogictests/suites/base/09_fuse_engine/09_0032_pr13848.test b/tests/sqllogictests/suites/base/09_fuse_engine/09_0032_pr13848.test index 2a6fb2fa61d9..fbb2fdc54eb4 100644 --- a/tests/sqllogictests/suites/base/09_fuse_engine/09_0032_pr13848.test +++ b/tests/sqllogictests/suites/base/09_fuse_engine/09_0032_pr13848.test @@ -25,10 +25,12 @@ insert into merge_source_0 values(5,'a4'),(6,'b6'); ## test window,agg,join ## 1. join test -statement ok +query TT merge into merge_target_0 as t1 using (select t2.a,t3.b from merge_source_0 as t2 inner join merge_source_0 as t3 on t2.a = t3.a) as t4 on t4.a = t1.a when matched then update * when not matched then insert *; +---- +2 2 query TT select * from merge_target_0 order by a,b; @@ -53,9 +55,11 @@ select * from merge_source_0 order by a,b; 3 c7 ## 2. agg test -statement ok +query TT merge into merge_target_0 as t1 using (select avg(a) as a,b from merge_source_0 group by b) as t2 on t1.a = t2.a when matched then update * when not matched then insert *; +---- +0 1 query TT select * from merge_target_0 order by a,b; @@ -68,9 +72,11 @@ select * from merge_target_0 order by a,b; 6 b6 ## 2. window func test -statement ok +query TT merge into merge_target_0 as t1 using (select row_number() OVER (PARTITION BY b ORDER BY a) as a,'d1' as b from merge_source_0) as t2 on t1.a = t2.a when matched then update * when not matched then insert *; +---- +0 2 query TT select * from merge_target_0 order by a,b; diff --git a/tests/suites/0_stateless/18_rbac/18_0001_udf_priv.result b/tests/suites/0_stateless/18_rbac/18_0001_udf_priv.result index 1b702fbe6659..ce01e9eb3525 100644 --- a/tests/suites/0_stateless/18_rbac/18_0001_udf_priv.result +++ b/tests/suites/0_stateless/18_rbac/18_0001_udf_priv.result @@ -46,6 +46,8 @@ Error: APIError: ResponseError with 1063: Permission denied, privilege [Usage] i 2 NULL 1 2 +0 +0 100 200 100 From d109e9758e72db22a62ea9e1435b25078b833a1f Mon Sep 17 00:00:00 2001 From: codedump Date: Mon, 4 Dec 2023 19:43:50 +0800 Subject: [PATCH 06/16] chore: fix array() function parse error (#13912) --- src/query/ast/src/parser/token.rs | 2 +- .../suites/query/02_function/02_0002_function_others.test | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/query/ast/src/parser/token.rs b/src/query/ast/src/parser/token.rs index aed2e06e2385..6ba51ac1078f 100644 --- a/src/query/ast/src/parser/token.rs +++ b/src/query/ast/src/parser/token.rs @@ -1288,7 +1288,7 @@ impl TokenKind { // | TokenKind::XMLSERIALIZE // | TokenKind::XMLTABLE | TokenKind::WHEN - | TokenKind::ARRAY + // | TokenKind::ARRAY | TokenKind::AS // | TokenKind::CHAR | TokenKind::CHARACTER diff --git a/tests/sqllogictests/suites/query/02_function/02_0002_function_others.test b/tests/sqllogictests/suites/query/02_function/02_0002_function_others.test index aab56c2038da..eb3c891e6023 100644 --- a/tests/sqllogictests/suites/query/02_function/02_0002_function_others.test +++ b/tests/sqllogictests/suites/query/02_function/02_0002_function_others.test @@ -62,3 +62,8 @@ SELECT INET_NTOA(NULL) ---- NULL +query T +SELECT array(1); +---- +[1] + From 35fec84008131e8bf5a1505890fa82584eb112a1 Mon Sep 17 00:00:00 2001 From: junxiangMu <63799833+guojidan@users.noreply.github.com> Date: Mon, 4 Dec 2023 19:45:45 +0800 Subject: [PATCH 07/16] feat: support connection in stage related infer_schema (#13890) * support connection in stage related infer_schema * add test case * fix path err * fix path err * add monio * always start minio --- .../actions/test_sqllogic_stage/action.yml | 1 - .../infer_schema/infer_schema_table.rs | 36 +++++++++++++++++-- .../infer_schema/table_args.rs | 16 ++++----- src/query/sql/src/planner/binder/mod.rs | 1 + .../stage/formats/parquet/infer_schema.test | 12 +++++++ 5 files changed, 53 insertions(+), 13 deletions(-) diff --git a/.github/actions/test_sqllogic_stage/action.yml b/.github/actions/test_sqllogic_stage/action.yml index 3de5dc4544c8..30b34b1c5afd 100644 --- a/.github/actions/test_sqllogic_stage/action.yml +++ b/.github/actions/test_sqllogic_stage/action.yml @@ -28,7 +28,6 @@ runs: target: ${{ inputs.target }} artifacts: sqllogictests,meta,query - name: Minio Setup for (ubuntu-latest only) - if: inputs.storage == 's3' shell: bash run: | docker run -d --network host --name minio \ diff --git a/src/query/service/src/table_functions/infer_schema/infer_schema_table.rs b/src/query/service/src/table_functions/infer_schema/infer_schema_table.rs index 01e013b70d59..f9d8c70ccdb6 100644 --- a/src/query/service/src/table_functions/infer_schema/infer_schema_table.rs +++ b/src/query/service/src/table_functions/infer_schema/infer_schema_table.rs @@ -13,8 +13,11 @@ // limitations under the License. use std::any::Any; +use std::collections::BTreeMap; use std::sync::Arc; +use common_ast::ast::FileLocation; +use common_ast::ast::UriLocation; use common_catalog::plan::DataSourcePlan; use common_catalog::plan::PartStatistics; use common_catalog::plan::Partitions; @@ -41,11 +44,12 @@ use common_pipeline_core::processors::ProcessorPtr; use common_pipeline_core::Pipeline; use common_pipeline_sources::AsyncSource; use common_pipeline_sources::AsyncSourcer; -use common_sql::binder::resolve_stage_location; +use common_sql::binder::resolve_file_location; use common_storage::init_stage_operator; use common_storage::read_parquet_schema_async; use common_storage::read_parquet_schema_async_rs; use common_storage::StageFilesInfo; +use opendal::Scheme; use crate::pipelines::processors::OutputPort; use crate::sessions::TableContext; @@ -179,8 +183,34 @@ impl AsyncSource for InferSchemaSource { } self.is_finished = true; - let (stage_info, path) = - resolve_stage_location(&self.ctx, &self.args_parsed.location).await?; + let file_location = if let Some(location) = + self.args_parsed.location.clone().strip_prefix('@') + { + FileLocation::Stage(location.to_string()) + } else if let Some(connection_name) = &self.args_parsed.connection_name { + let conn = self.ctx.get_connection(connection_name).await?; + let uri = UriLocation::from_uri( + self.args_parsed.location.clone(), + "".to_string(), + conn.storage_params, + )?; + let proto = conn.storage_type.parse::()?; + if proto != uri.protocol.parse::()? { + return Err(ErrorCode::BadArguments(format!( + "protocol from connection_name={connection_name} ({proto}) not match with uri protocol ({0}).", + uri.protocol + ))); + } + FileLocation::Uri(uri) + } else { + let uri = UriLocation::from_uri( + self.args_parsed.location.clone(), + "".to_string(), + BTreeMap::default(), + )?; + FileLocation::Uri(uri) + }; + let (stage_info, path) = resolve_file_location(&self.ctx, &file_location).await?; let enable_experimental_rbac_check = self .ctx .get_settings() diff --git a/src/query/service/src/table_functions/infer_schema/table_args.rs b/src/query/service/src/table_functions/infer_schema/table_args.rs index 33b9908a6166..ce01efc219c0 100644 --- a/src/query/service/src/table_functions/infer_schema/table_args.rs +++ b/src/query/service/src/table_functions/infer_schema/table_args.rs @@ -21,6 +21,7 @@ use common_storages_fuse::table_functions::string_value; #[derive(Clone)] pub(crate) struct InferSchemaArgsParsed { pub(crate) location: String, + pub(crate) connection_name: Option, pub(crate) file_format: Option, pub(crate) files_info: StageFilesInfo, } @@ -30,6 +31,7 @@ impl InferSchemaArgsParsed { let args = table_args.expect_all_named("infer_schema")?; let mut location = None; + let mut connection_name = None; let mut file_format = None; let mut files_info = StageFilesInfo { path: "".to_string(), @@ -40,15 +42,10 @@ impl InferSchemaArgsParsed { for (k, v) in &args { match k.to_lowercase().as_str() { "location" => { - let v = string_value(v)?; - if let Some(name) = v.strip_prefix('@') { - location = Some(name.to_string()); - } else { - return Err(ErrorCode::BadArguments(format!( - "location must start with @, but got {}", - v - ))); - } + location = Some(string_value(v)?); + } + "connection_name" => { + connection_name = Some(string_value(v)?); } "pattern" => { files_info.pattern = Some(string_value(v)?); @@ -71,6 +68,7 @@ impl InferSchemaArgsParsed { Ok(Self { location, + connection_name, file_format, files_info, }) diff --git a/src/query/sql/src/planner/binder/mod.rs b/src/query/sql/src/planner/binder/mod.rs index 2692087634ac..86ae9ff07961 100644 --- a/src/query/sql/src/planner/binder/mod.rs +++ b/src/query/sql/src/planner/binder/mod.rs @@ -59,6 +59,7 @@ pub use binder::Binder; pub use builders::*; pub use column_binding::ColumnBinding; pub use column_binding::ColumnBindingBuilder; +pub use copy_into_table::resolve_file_location; pub use copy_into_table::resolve_stage_location; pub use internal_column_factory::INTERNAL_COLUMN_FACTORY; pub use location::parse_uri_location; diff --git a/tests/sqllogictests/suites/stage/formats/parquet/infer_schema.test b/tests/sqllogictests/suites/stage/formats/parquet/infer_schema.test index 8e1623cd02dd..ab2c1a6dfe4c 100644 --- a/tests/sqllogictests/suites/stage/formats/parquet/infer_schema.test +++ b/tests/sqllogictests/suites/stage/formats/parquet/infer_schema.test @@ -50,3 +50,15 @@ select * from infer_schema(location => '@data/parquet/', FILE_FORMAT => 'PARQUET ---- id INT 0 0 t TUPLE(A INT32, B STRING) 0 1 + +statement ok +drop CONNECTION IF EXISTS my_conn + +statement ok +create CONNECTION my_conn STORAGE_TYPE = 's3' access_key_id='minioadmin' secret_access_key='minioadmin' endpoint_url='http://127.0.0.1:9900/' + +query +select * from INFER_SCHEMA(location => 's3://testbucket/data/parquet/tuple.parquet', connection_name => 'my_conn') +---- +id INT 0 0 +t TUPLE(A INT32, B STRING) 0 1 From 4f1bfcd6d9069db4d8935942afbb355b7233a5da Mon Sep 17 00:00:00 2001 From: JackTan25 <60096118+JackTan25@users.noreply.github.com> Date: Mon, 4 Dec 2023 21:29:10 +0800 Subject: [PATCH 08/16] fix: Fix source optimized without enable distributed but in clusters environment (#13910) * fix complex source ditributed bug * add more tests * refactor test * add test * add complex source test * add complex source test --------- Co-authored-by: dantengsky --- .../sql/src/planner/optimizer/optimizer.rs | 15 +- .../09_fuse_engine/09_0026_merge_into.test | 1029 +---------------- .../09_0026_merge_into_separate_pipeline.test | 91 +- .../09_0028_distributed_merge_into.test | 169 +-- .../base/09_fuse_engine/09_0032_pr13848.test | 113 +- ...distributed_merge_into_without_enable.test | 223 ++++ ...34_pr13848_without_distributed_enable.test | 112 ++ ...e_pipeline_without_distributed_enable.test | 96 ++ ...merge_into_without_distributed_enable.test | 1028 ++++++++++++++++ 9 files changed, 1475 insertions(+), 1401 deletions(-) create mode 100644 tests/sqllogictests/suites/base/09_fuse_engine/09_0033_distributed_merge_into_without_enable.test create mode 100644 tests/sqllogictests/suites/base/09_fuse_engine/09_0034_pr13848_without_distributed_enable.test create mode 100644 tests/sqllogictests/suites/base/09_fuse_engine/09_0035_merge_into_separate_pipeline_without_distributed_enable.test create mode 100644 tests/sqllogictests/suites/base/09_fuse_engine/09_0036_merge_into_without_distributed_enable.test diff --git a/src/query/sql/src/planner/optimizer/optimizer.rs b/src/query/sql/src/planner/optimizer/optimizer.rs index aa872f1eac7e..59ef8b0db8f7 100644 --- a/src/query/sql/src/planner/optimizer/optimizer.rs +++ b/src/query/sql/src/planner/optimizer/optimizer.rs @@ -137,10 +137,17 @@ pub fn optimize( plan.meta_data.clone(), plan.input.child(1)?.clone(), )?; - // we need to remove exchange of right_source, because it's - // not an end query. - if let RelOperator::Exchange(_) = right_source.plan.as_ref() { - right_source = right_source.child(0)?.clone(); + + // if it's not distributed execution, we should reserve + // exchange to merge source data. + if opt_ctx.config.enable_distributed_optimization + && ctx.get_settings().get_enable_distributed_merge_into()? + { + // we need to remove exchange of right_source, because it's + // not an end query. + if let RelOperator::Exchange(_) = right_source.plan.as_ref() { + right_source = right_source.child(0)?.clone(); + } } // replace right source let mut join_sexpr = plan.input.clone(); diff --git a/tests/sqllogictests/suites/base/09_fuse_engine/09_0026_merge_into.test b/tests/sqllogictests/suites/base/09_fuse_engine/09_0026_merge_into.test index 7a7ec128d1f4..b56efaf7794d 100644 --- a/tests/sqllogictests/suites/base/09_fuse_engine/09_0026_merge_into.test +++ b/tests/sqllogictests/suites/base/09_fuse_engine/09_0026_merge_into.test @@ -1,1034 +1,7 @@ -statement ok -set enable_experimental_merge_into = 1; - statement ok set enable_distributed_merge_into = 1; -statement ok -drop table if exists t1; - -statement ok -drop table if exists t2; - -statement ok -create table t1(a int,b string, c string); - -statement ok -create table t2(a int,b string, c string); - -statement ok -insert into t1 values(1,'b1','c1'),(2,'b2','c2'); - -statement ok -insert into t1 values(2,'b3','c3'),(3,'b4','c4'); - -query TTT -select * from t1 order by a,b,c; ----- -1 b1 c1 -2 b2 c2 -2 b3 c3 -3 b4 c4 - -statement ok -insert into t2 values(1,'b_5','c_5'),(3,'b_6','c_6'); - -statement ok -insert into t2 values(2,'b_7','c_7'); - -query TTT -select * from t2 order by a,b,c; ----- -1 b_5 c_5 -2 b_7 c_7 -3 b_6 c_6 - -## test source alias -statement error 1005 -merge into t1 using (select * from t2 ) on t1.a = t2.a when matched then update set t1.c = t2.c,t1.c = t2.c; - -# section I: basic test for match and unmatch - -statement error 1006 -merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then update set t1.c = t2.c,t1.c = t2.c; - -query T -merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then update set t1.c = t2.c; ----- -4 - -query TTT -select * from t1 order by a,b,c; ----- -1 b1 c_5 -2 b2 c_7 -2 b3 c_7 -3 b4 c_6 - -statement ok -insert into t2 values(4,'b_8','c_8'); - -query TTT -select * from t2 order by a,b,c; ----- -1 b_5 c_5 -2 b_7 c_7 -3 b_6 c_6 -4 b_8 c_8 - -query TT -merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then update set t1.c = t2.c when not matched then insert (a,b,c) values(t2.a,t2.b,t2.c); ----- -1 4 - -query TTT -select * from t1 order by a,b,c; ----- -1 b1 c_5 -2 b2 c_7 -2 b3 c_7 -3 b4 c_6 -4 b_8 c_8 - -statement ok -insert into t2 values(1,'b_9','c_9'); - -statement error 4001 -merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then update set t1.c = t2.c when not matched then insert (a,b,c) values(t2.a,t2.b,t2.c); - -query TTT -select * from t1 order by a,b,c; ----- -1 b1 c_5 -2 b2 c_7 -2 b3 c_7 -3 b4 c_6 -4 b_8 c_8 - -statement ok -delete from t2 where a = 1; - -query TTT -select * from t2 order by a,b,c; ----- -2 b_7 c_7 -3 b_6 c_6 -4 b_8 c_8 - -statement ok -insert into t2 values(5,'b_9','c_9'); - -query TTT -select * from t2 order by a,b,c; ----- -2 b_7 c_7 -3 b_6 c_6 -4 b_8 c_8 -5 b_9 c_9 - -query TT -merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then delete; ----- -4 - -query ITT -select * from t1 order by a,b,c; ----- -1 b1 c_5 - -# section 2 multi clauses -statement ok -insert into t1 values(2,'b_1','c_1'),(3,'b_2','c_2'); - -query TTT -select * from t1 order by a,b,c; ----- -1 b1 c_5 -2 b_1 c_1 -3 b_2 c_2 - -statement error 1065 -merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then delete when matched then update set t1.c = t2.c when not matched and t2.c = 'c_8' then insert (a,b,c) values(t2.a,t2.b,t2.c); - -query TTT -merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched and t1.b = 'b_1' then delete when matched then update set t1.c = t2.c when not matched and t2.c = 'c_8' then insert (a,b,c) values(t2.a,t2.b,t2.c); ----- -1 1 1 - -query TTT -select * from t1 order by a,b,c; ----- -1 b1 c_5 -3 b_2 c_6 -4 b_8 c_8 - -query TT -merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then delete when not matched and t2.c = 'c_9' then insert (a,b,c) values(t2.a,t2.b,t2.c); ----- -1 2 - -query TTT -select * from t1 order by a,b,c; ----- -1 b1 c_5 -5 b_9 c_9 - -query T -merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when not matched and t2.c = 'c_8' then insert (a,b) values(t2.a,t2.b) when not matched and t2.c = 'c_7' then insert (a,c) values(t2.a,t2.c); ----- -2 - -query TTT -select * from t1 order by a,b,c; ----- -1 b1 c_5 -2 NULL c_7 -4 b_8 NULL -5 b_9 c_9 - -statement ok -insert into t2 values(5,'b_10','c_10'); - -query TTT -select * from t2 order by a,b,c; ----- -2 b_7 c_7 -3 b_6 c_6 -4 b_8 c_8 -5 b_10 c_10 -5 b_9 c_9 - -statement error 4001 -merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched and t2.c = 'c_9' then update set t1.b = 'b_11' when matched and t2.c = 'c_10' then delete; - -## idempotent delete test -query T -merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then delete; ----- -3 - -query TTT -select * from t1 order by a,b,c; ----- -1 b1 c_5 - -## test star for merge into -statement ok -truncate table t1; - -statement ok -truncate table t2; - -query I -select count(*) from t1; ----- -0 - -query I -select count(*) from t2; ----- -0 - -statement ok -insert into t1 values(1,'b1','c1'),(2,'b2','c2'); - -query TTT -select * from t1 order by a,b,c; ----- -1 b1 c1 -2 b2 c2 - -statement ok -insert into t2 values(1,'b3','c3'),(3,'b4','c4'); - -query TTT -select * from t2 order by a,b,c; ----- -1 b3 c3 -3 b4 c4 - -## test insert columns mismatch -statement error 1065 -merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when not matched then insert values(t2.a,t2.c); - -query TT -merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then update * when not matched then insert *; ----- -1 1 - - -query TTT -select * from t1 order by a,b,c; ----- -1 b3 c3 -2 b2 c2 -3 b4 c4 - -## test multi same name for star -statement error 1065 -merge into t1 using (select a,b,c,a from t2 ) as t2 on t1.a = t2.a when matched then update *; - -statement error 1065 -merge into t1 using (select a,b,c,a,b from t2 ) as t2 on t1.a = t2.a when not matched then insert *; - -## stage file test -statement ok -drop table if exists test_stage; - -statement ok -drop table if exists target_table; - -statement ok -create table target_table(a int,b string,c string); - -statement ok -insert into target_table values(1,'a_1','b_1'),(2,'a_2','b_2'); - -query TTT -select * from target_table order by a,b,c; ----- -1 a_1 b_1 -2 a_2 b_2 - -statement ok -create table test_stage(a int,b string,c string); - -statement ok -insert into test_stage values(1,'a1','b1'),(2,'a2','b2'),(3,'a3','b3'); - -query TTT -select * from test_stage order by a,b,c; ----- -1 a1 b1 -2 a2 b2 -3 a3 b3 - -statement ok -drop stage if exists s5_merge_into; - -statement ok -drop stage if exists s4_merge_into; - -statement ok -create stage s5_merge_into FILE_FORMAT = (TYPE = CSV); - -statement ok -remove @s5_merge_into; - -statement ok -copy into @s5_merge_into from (select a,b,c from test_stage order by a,b,c); - -query TTT -select $1,$2,$3 from @s5_merge_into order by $1,$2,$3; ----- -1 a1 b1 -2 a2 b2 -3 a3 b3 - -## test CSV -query TT -merge into target_table using (select $1,$2,$3 from @s5_merge_into) as cdc on cast(cdc.$1 as int) = target_table.a when matched then delete when not matched then insert values(cdc.$1,cdc.$2,cdc.$3); ----- -1 2 - -query TTT -select * from target_table order by a,b,c; ----- -3 a3 b3 - -## test parquet -statement ok -truncate table target_table; - -query I -select count(*) from target_table; ----- -0 - -statement ok -create stage s4_merge_into FILE_FORMAT = (TYPE = PARQUET); - -statement ok -remove @s4_merge_into; - -statement ok -copy into @s4_merge_into from (select a,b,c from test_stage order by a,b,c); - -query TTT -select $1,$2,$3 from @s4_merge_into order by $1,$2,$3; ----- -1 a1 b1 -2 a2 b2 -3 a3 b3 - -statement ok -insert into target_table values(1,'a_1','b_1'),(2,'a_2','b_2'); - -query TTT -select * from target_table order by a,b,c; ----- -1 a_1 b_1 -2 a_2 b_2 - -query TT -merge into target_table using (select $1,$2,$3 from @s4_merge_into) as cdc on cdc.$1 = target_table.a when matched then delete when not matched then insert values(cdc.$1,cdc.$2,cdc.$3); ----- -1 2 - -query TTT -select * from target_table order by a,b,c; ----- -3 a3 b3 - -## NULL test, for join, if join_expr result is -## NULL, it will be treated as not macthed. -statement ok -truncate table t1; - -statement ok -truncate table t2; - -query I -select count(*) from t1; ----- -0 - -query I -select count(*) from t2; ----- -0 - -statement ok -insert into t1 values(NULL,'b_1','c_1'); - -query TTT -select * from t1 order by a,b,c; ----- -NULL b_1 c_1 - -statement ok -insert into t2 values(1,'b_4','c_4'),(2,'b_2','c_2'),(NULL,'b_3','c_3'); - -query TTT -select * from t2 order by a,b,c; ----- -1 b_4 c_4 -2 b_2 c_2 -NULL b_3 c_3 - -query TT -merge into t1 using (select * from t2) as t2 on t1.a = t2.a when matched then delete when not matched then insert *; ----- -3 0 - -query TTT -select * from t1 order by a,b,c; ----- -1 b_4 c_4 -2 b_2 c_2 -NULL b_1 c_1 -NULL b_3 c_3 - -query T -merge into t1 using (select * from t2) as t2 on t1.a = t2.a when matched then delete; ----- -2 - -query TTT -select * from t1 order by a,b,c; ----- -NULL b_1 c_1 -NULL b_3 c_3 - -statement ok -truncate table t1; - -statement ok -truncate table t2; - -query I -select count(*) from t1; ----- -0 - -query I -select count(*) from t2; ----- -0 - -## test target table alias -statement ok -insert into t2 values(1,'a1','b1'); - -query TT -merge into t1 as t3 using (select * from t2 ) as t2 on t3.a = t2.a when not matched then insert (a,b,c) values(t2.a,t2.b,t2.c); ----- -1 - -query TTT -select * from t1 order by a,b,c; ----- -1 a1 b1 - -statement ok -CREATE TABLE employees (employee_id INT, employee_name VARCHAR(255),department VARCHAR(255)); - -statement ok -CREATE TABLE salaries (employee_id INT,salary DECIMAL(10, 2)); - -statement ok -INSERT INTO employees VALUES(1, 'Alice', 'HR'),(2, 'Bob', 'IT'),(3, 'Charlie', 'Finance'),(4, 'David', 'HR'); - -statement ok -INSERT INTO salaries VALUES(1, 50000.00),(2, 60000.00); - -query TT -MERGE INTO salaries USING (SELECT * FROM employees) as employees ON salaries.employee_id = employees.employee_id WHEN MATCHED AND employees.department = 'HR' THEN UPDATE SET salaries.salary = salaries.salary + 1000.00 WHEN MATCHED THEN UPDATE SET salaries.salary = salaries.salary + 500.00 WHEN NOT MATCHED THEN INSERT (employee_id, salary) VALUES (employees.employee_id, 55000.00); ----- -2 2 - -query TTT -select * from salaries order by employee_id; ----- -1 51000.00 -2 60500.00 -3 55000.00 -4 55000.00 - -## null cast bug fix -statement ok -create table t1_target(a int not null); - -statement ok -create table t2_source(a int not null); - -statement ok -insert into t1_target values(1); - -statement ok -insert into t2_source values(1),(2); - -query TT -merge into t1_target using (select * from t2_source) as t2_source on t1_target.a = t2_source.a when matched then update * when not matched then insert *; ----- -1 1 - -query T -select * from t1_target order by a; ----- -1 -2 - -## cluster table test -statement ok -create table cluster_target(a int,b string,c int) cluster by(a,b); - -statement ok -create table cluster_source(a int,b string,c int); - -statement ok -insert into cluster_source values(12,'b',1),(1,'a',2),(2,'b',3),(2,'a',4),(3,'a',3); - -## test update indetify error -statement error 1006 -merge into cluster_target as t1 using (select * from cluster_source) as t2 on t1.a = t2.a when matched then update set cluster_target.a = t2.a; - -statement error 1006 -merge into cluster_target as t1 using (select * from cluster_source) as t2 on t1.a = t2.a when matched then update set t2.a = t2.a; - -query TT -merge into cluster_target as t1 using (select * from cluster_source) as t2 on t1.a = t2.a when not matched then insert *; ----- -5 - -# By default setting, all rows merged from `cluster_source` will be resident in a single block of `cluster_target`, -# as table `cluster_target` is clustered by `(a,b)`, the rows inside the one block are assumed to be sorted -# by `(a, b)`, consequently, the result of the following query should be ordered by `(a,b)` without an explicit -# `order by` clause. -query TTT -select * from cluster_target; ----- -1 a 2 -2 a 4 -2 b 3 -3 a 3 -12 b 1 - -## add more tests -statement ok -drop table if exists target_test; - -statement ok -drop table if exists source_test; - -statement ok -create table target_test(a int,b string); - -statement ok -insert into target_test values(1,'a'),(2,'b'),(3,'c'); - -statement ok -create table source_test(a int,b string,delete_flag bool); - -statement ok -insert into source_test values(1,'d',true),(2,'e',true),(3,'f',false),(4,'e',true),(5,'f',false); - -statement ok -create stage source_parquet file_format = (type = parquet); - -statement ok -remove @source_parquet; - -statement ok -copy into @source_parquet from (select * from source_test); - -query TTT -merge into `target_test` as tt using (select `a`,`b`,`delete_flag` from @source_parquet (pattern => '.*[.]parquet')) as ss on (ss.`a` = tt.`a`) -when matched and ss.`delete_flag` = true then delete when matched then update * when not matched and ss.`delete_flag` = false then insert *; ----- -1 1 2 - -query TT -select * from target_test order by a; ----- -3 f -5 f - -## test not match cast and predicate index -statement ok -drop table if exists test_order; - -statement ok -drop table if exists random_source; - -statement ok -create table test_order(id bigint, id1 bigint, id2 bigint, id3 bigint, id4 bigint, id5 bigint, id6 bigint, id7 bigint, s1 varchar, s2 varchar, s3 varchar, s4 varchar, s5 varchar, s6 varchar, s7 varchar, s8 varchar, s9 varchar, s10 varchar, s11 varchar, s12 varchar, s13 varchar, d1 DECIMAL(20, 8), d2 DECIMAL(20, 8), d3 DECIMAL(20, 8), d4 DECIMAL(20, 8), d5 DECIMAL(20, 8), d6 DECIMAL(30, 8), d7 DECIMAL(30, 8), d8 DECIMAL(30, 8), d9 DECIMAL(30, 8), d10 DECIMAL(30, 8),insert_time datetime, insert_time1 datetime, insert_time2 datetime, insert_time3 datetime,i int) CLUSTER BY(to_yyyymmdd(insert_time), id) bloom_index_columns='insert_time,id'; - -statement ok -create table random_source(id bigint not null, id1 bigint, id2 bigint, id3 bigint, id4 bigint, id5 bigint, id6 bigint, id7 bigint,s1 varchar, s2 varchar, s3 varchar, s4 varchar, s5 varchar, s6 varchar, s7 varchar, s8 varchar, s9 varchar, s10 varchar, s11 varchar, s12 varchar, s13 varchar,d1 DECIMAL(20, 8), d2 DECIMAL(20, 8), d3 DECIMAL(20, 8), d4 DECIMAL(20, 8), d5 DECIMAL(20, 8), d6 DECIMAL(30, 8), d7 DECIMAL(30, 8), d8 DECIMAL(30, 8), d9 DECIMAL(30, 8), d10 DECIMAL(30, 8),insert_time datetime not null, insert_time1 datetime, insert_time2 datetime, insert_time3 datetime,i int) Engine = Random; - -statement ok -merge into test_order as t using (select id,34 as id1,238 as id2, id3, id4, id5, id6, id7,s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13,d1, d2, d3, d4, d5, d6, d7, d8, d9, d10,insert_time,insert_time1,insert_time2,insert_time3,i from random_source limit 1) as s on t.id = s.id and t.insert_time = s.insert_time when matched then update * when not matched then insert *; - -## test update list #13297 -statement ok -create table t11(a int,b string, c string); - -statement ok -create table t12(a int,b string, c string); - -statement ok -insert into t11 values(1,'b1','c1'),(2,'b2','c2'); - -statement ok -insert into t12 values(1,'b_5','c_5'),(3,'b_6','c_6'); - -statement error 1065 -merge into t11 using (select a, c from t12) as t12 on t11.a = t12.a when matched and max(t11.a) > 0 then update set c = t12.c; - -statement error 1065 -merge into t11 using (select a, c from t12) as t12 on t11.a = t12.a when matched then update set c = count(*); - -## test issue #13287 -statement ok -create table tt1 (a int, b int); - -statement error 1065 -merge into tt1 using(select 10, 20) as tt2 on tt1.a = 1 when not matched and tt1.b = 2 then insert values (10, 20); - -query TT -merge into tt1 using(select 10 as a, 20 as b) as tt2 on tt1.a = 1 when not matched and tt2.b = 2 then insert values (10, 20); ----- -0 - -query T -select count(*) from tt1; ----- -0 - -## test issue #13367 -statement ok -create table tt2(a bool, b variant, c map(string, string)); - -statement ok -insert into tt2 values (true, '10', {'k1':'v1'}), (false, '20', {'k2':'v2'}) - -query T -merge into tt2 using(select true as x) as t on (x and tt2.a) when matched and tt2.a then update set tt2.b = parse_json('30'); ----- -1 - -query TTT -select a, b, c from tt2 order by b; ----- -0 20 {'k2':'v2'} -1 30 {'k1':'v1'} - -## add test: source is table -statement ok -drop table if exists t1; - -statement ok -drop table if exists t2; - -statement ok -create table t1(a int); - -statement ok -create table t2(a int); - -statement ok -insert into t1 values(1); - -statement ok -insert into t2 values(1),(2); - -query TT -merge into t1 using t2 on t1.a = t2.a when matched then delete when not matched then insert *; ----- -1 1 - -query T -select * from t1; ----- -2 - -statement ok -drop table if exists t1; - -statement ok -drop table if exists t2; - -statement ok -create table t1(b int); - -statement ok -create table t2(a int); - -statement ok -insert into t1 values(1); - -statement ok -insert into t2 values(1),(2); - -statement error 1065 -merge into t1 using t2 on t1.a = t2.a when matched then delete when not matched then insert *; - -## add more multi matched statement test -statement ok -drop table if exists t1; - -statement ok -drop table if exists t2; - -statement ok -create table t1(a int,b string,c bool); - -statement ok -create table t2(a int,b string,c bool); - -statement ok -insert into t1 values(1,'a1',true),(2,'a2',false),(3,'a3',true); - -statement ok -insert into t2 values(1,'b1',true),(2,'b2',false),(3,'b3',true); - -query TTT -select * from t1; ----- -1 a1 1 -2 a2 0 -3 a3 1 - -query TTT -select * from t2; ----- -1 b1 1 -2 b2 0 -3 b3 1 - -query TT -merge into t1 using t2 on t1.a = t2.a when matched and t1.a = 1 then delete when matched and t1.a = 2 then update * when matched and t1.a = 3 then delete; ----- -1 2 - -query TTT -select * from t1; ----- -2 b2 0 - -query T -merge into t1 using t2 on t1.a = t2.a when matched then delete; ----- -1 - -query T -select count(*) from t1; ----- -0 - -statement ok -insert into t1 values(1,'a1',true),(2,'a2',false),(3,'a3',true); - -query TT -merge into t1 using t2 on t1.a = t2.a when matched and t1.a = 2 then update * when matched and t1.a = 1 then delete when matched and t1.a = 3 then update *; ----- -2 1 - -query TTT -select * from t1; ----- -2 b2 0 -3 b3 1 - -## issue 13454 -statement ok -drop table if exists tt1; - -statement ok -create table tt1(a bool, b int); - -statement ok -insert into tt1 values (true, 1), (false, 2); - -query T -merge into tt1 using (select 1 as x) as tt2 on (2 > 1) when matched and a then delete; ----- -1 - -query TT -select * from tt1; ----- -0 2 - -## issue #13298 -statement ok -drop table if exists t11; - -statement ok -drop table if exists t12; - -statement ok -create table t12 (a int, b int); - -statement ok -create table t11 (a int, b int); - -statement ok -insert into t11 values (1, 10),(2, 20),(3, 30),(4, 40); - -statement ok -insert into t12 values (1, 10),(2, 20),(3, 30),(4, 40); - -query T -MERGE INTO t11 USING(SELECT NULL AS c0 FROM t12) AS t12 ON (t11.a OR TRUE) WHEN MATCHED AND TRUE THEN DELETE; ----- -4 - -query T -select count(*) from t11; ----- -0 - -## test issue #13732 -statement ok -CREATE TABLE orders CLUSTER BY (to_yyyymmddhh(created_at), user_id) AS SELECT - number % 5000 AS order_id, - number % 10000 AS user_id, - CASE WHEN (rand() * 10)::int % 2 = 0 THEN 'buy' - ELSE 'sell' - END AS order_type, - CASE WHEN (rand() * 10)::int % 3 = 0 THEN 'BTC' - WHEN (rand() * 10)::int % 3 = 1 THEN 'ETH' - ELSE 'XRP' - END AS asset_type, - (rand() * 100)::decimal(18, 8) AS quantity, - (rand() * 1000)::decimal(18, 8) AS price, - CASE WHEN (rand() * 10)::int % 3 = 0 THEN 'completed' - WHEN (rand() * 10)::int % 3 = 1 THEN 'pending' - ELSE 'cancelled' - END AS status, - date_add('day', floor(rand() * 10 % 365)::int, '2021-01-01') AS created_at, - date_add('day', floor(rand() * 10 % 365)::int, '2021-01-01') AS updated_at -FROM numbers(5000); - -statement ok -MERGE INTO orders USING -( - SELECT - number % 5000 AS order_id, - number % 100000 AS user_id, - CASE WHEN (rand() * 10)::int % 2 = 0 THEN 'buy' - ELSE 'sell' - END AS order_type, - CASE WHEN (rand() * 10)::int % 3 = 0 THEN 'BTC' - WHEN (rand() * 10)::int % 3 = 1 THEN 'ETH' - ELSE 'XRP' - END AS asset_type, - (rand() * 100)::decimal(18, 8) AS quantity, - (rand() * 1000)::decimal(18, 8) AS price, - CASE WHEN (rand() * 10)::int % 3 = 0 THEN 'completed' - WHEN (rand() * 10)::int % 3 = 1 THEN 'pending' - ELSE 'cancelled' - END AS status, - date_add('day', floor(rand() * 10 % 365)::int, '2021-01-01') AS created_at, - date_add('day', floor(rand() * 10 % 365)::int, '2021-01-01') AS updated_at - FROM numbers(5000) -) AS source -ON orders.order_id = source.order_id -WHEN MATCHED THEN - UPDATE SET - orders.user_id = source.user_id, - orders.order_type = source.order_type, - orders.asset_type = source.asset_type, - orders.quantity = source.quantity, - orders.price = source.price, - orders.status = source.status, - orders.created_at = source.created_at, - orders.updated_at = source.updated_at -WHEN NOT MATCHED THEN - INSERT (order_id, user_id, order_type, asset_type, quantity, price, status, created_at, updated_at) - VALUES (source.order_id, source.user_id, source.order_type, source.asset_type, source.quantity, source.price, source.status, source.created_at, source.updated_at); - -## test issue #13733 -statement ok -CREATE TABLE transactions CLUSTER BY (to_yyyymmddhh(transaction_time), user_id) AS SELECT - number % 1000000 AS transaction_id, - number % 100000 AS user_id, - CASE WHEN (rand() * 10)::int % 3 = 0 THEN 'deposit' - WHEN (rand() * 10)::int % 3 = 1 THEN 'withdrawal' - ELSE 'trade' -END AS transaction_type, - CASE WHEN (rand() * 10)::int % 3 = 0 THEN 'BTC' - WHEN (rand() * 10)::int % 3 = 1 THEN 'ETH' - ELSE 'XRP' -END AS asset_type, - (rand() * 100)::decimal(18, 8) AS quantity, - date_add('day', floor(rand() * 10 % 365)::int, '2021-01-01') AS transaction_time -FROM numbers(1000000); - -statement ok -MERGE INTO orders AS tt USING -( - SELECT - CASE - WHEN number % 2 = 0 THEN (number / 2) % 250000 - ELSE (SELECT MAX(order_id) FROM orders) + number + 1 - END AS order_id, - number % 100000 AS user_id, - CASE WHEN (rand() * 10)::int % 2 = 0 THEN 'buy' - ELSE 'sell' - END AS order_type, - CASE WHEN (rand() * 10)::int % 3 = 0 THEN 'BTC' - WHEN (rand() * 10)::int % 3 = 1 THEN 'ETH' - ELSE 'XRP' - END AS asset_type, - (rand() * 100)::decimal(18, 8) AS quantity, - (rand() * 1000)::decimal(18, 8) AS price, - CASE WHEN (rand() * 10)::int % 3 = 0 THEN 'completed' - WHEN (rand() * 10)::int % 3 = 1 THEN 'pending' - ELSE 'cancelled' - END AS status, - date_add('day', floor(rand() * 10 % 365)::int, '2021-01-01') AS created_at, - date_add('day', floor(rand() * 10 % 365)::int, '2021-01-01') AS updated_at, - CASE WHEN number % 2 = 0 THEN false ELSE true END AS is_delete - FROM numbers(5000) -) AS ss -ON (tt.user_id = ss.user_id AND tt.asset_type = ss.asset_type) -WHEN MATCHED AND ss.is_delete = true THEN - DELETE -WHEN MATCHED AND ss.is_delete = false THEN - UPDATE * WHEN NOT MATCHED THEN - INSERT *; - -## unsupport complex exprs for now. -## #13798 we need to support non-correlated-subquery for unmatched values exprs -statement error 1065 -MERGE INTO orders USING ( - SELECT t.user_id, t.asset_type, 'buy' AS synthetic_order_type, SUM(t.quantity) AS total_quantity, today() AS synthetic_date - FROM transactions t - WHERE t.transaction_type = 'deposit' - GROUP BY t.user_id, t.asset_type - HAVING SUM(t.quantity) > 100 -) AS synthetic_orders ON orders.user_id = synthetic_orders.user_id AND orders.asset_type = synthetic_orders.asset_type -WHEN NOT MATCHED THEN - INSERT (order_id, user_id, order_type, asset_type, quantity, price, status, created_at, updated_at) - VALUES ((SELECT MAX(order_id) FROM orders) + 1, synthetic_orders.user_id, synthetic_orders.synthetic_order_type, synthetic_orders.asset_type, synthetic_orders.total_quantity, 0, 'pending', synthetic_orders.synthetic_date, synthetic_orders.synthetic_date); - -## issue #13810: rewrite rule test -statement ok -DROP TABLE IF EXISTS orders; - -statement ok -CREATE TABLE orders ( - order_id INT NOT NULL, - user_id INT NOT NULL, - order_type VARCHAR NOT NULL, - asset_type VARCHAR NOT NULL, - quantity DECIMAL(18,8) NOT NULL, - price DECIMAL(18,8) NOT NULL, - status VARCHAR NOT NULL, - created_at DATE NOT NULL, - updated_at DATE NOT NULL -) row_per_block=5113; - -statement ok -insert into orders values(200007,7,'buy','BTC',4.81412194,48.14121943,'completed',to_date('2021-01-01'),to_date('2021-01-01')), -(200015,15,'buy','BTC',3.78463552,37.84635523,'completed',to_date('2021-01-01'),to_date('2021-01-01')), -(200019,19,'buy','BTC',1.61186913,16.11869132,'completed',to_date('2021-01-01'),to_date('2021-01-01')), -(200031,31,'buy','BTC',3.99013730,39.90137297,'completed',to_date('2021-01-01'),to_date('2021-01-01')), -(200047,47,'buy','BTC',0.98841829,9.88418289,'completed',to_date('2021-01-01'),to_date('2021-01-01')), -(200077,77,'buy','BTC',2.07360391,20.73603908,'completed',to_date('2021-01-01'),to_date('2021-01-01')), -(200087,87,'sell','ETH',9.64567442,96.45674419,'pending',to_date('2021-01-01'),to_date('2021-01-01')), -(200095,95,'buy','BTC',2.26686563,22.66865634,'completed',to_date('2021-01-01'),to_date('2021-01-01')), -(200098,98,'buy','BTC',1.37252960,13.72529599,'completed',to_date('2021-01-01'),to_date('2021-01-01')), -(200102,102,'buy','BTC',1.53596481,15.35964815,'completed',to_date('2021-01-01'),to_date('2021-01-01')); - -query T -MERGE INTO orders USING ( - SELECT o.order_id, o.user_id, o.order_type, o.asset_type, o.quantity + a.avg_quantity AS new_quantity, o.price, o.status, o.created_at, o.updated_at - FROM orders o - INNER JOIN ( - SELECT user_id, asset_type, sum(quantity) AS avg_quantity - FROM orders - GROUP BY user_id, asset_type - ) a ON o.user_id = a.user_id AND o.asset_type = a.asset_type -) AS joined_data ON orders.order_id = joined_data.order_id - WHEN MATCHED THEN - UPDATE SET orders.quantity = joined_data.new_quantity; ----- -10 - -query TTTT -SELECT SUM(quantity) AS total_quantity, - AVG(quantity) AS average_quantity, - MIN(quantity) AS min_quantity, - MAX(quantity) AS max_quantity -FROM orders; ----- -64.16764110 6.416764110000 1.97683658 19.29134884 - -statement ok -create table tb_01 (id int,c1 varchar,c2 datetime(0),c3 json); - -statement ok -create table tmp_01 like tb_01; - -statement ok -insert into tmp_01 values(1,'abc',to_date('2023-11-29'),parse_json('{"a":1}')); - -query TT -merge into tb_01 as T using ( select * from tmp_01) as S on t.id = s.id when matched then update * when not matched then insert *; ----- -1 0 - -query TTT -select id,c1,to_date(c2),c3 from tb_01; ----- -1 abc 2023-11-29 {"a":1} +include ./09_0036_merge_into_without_distributed_enable.test statement ok set enable_distributed_merge_into = 0; - -statement ok -set enable_experimental_merge_into = 0; diff --git a/tests/sqllogictests/suites/base/09_fuse_engine/09_0026_merge_into_separate_pipeline.test b/tests/sqllogictests/suites/base/09_fuse_engine/09_0026_merge_into_separate_pipeline.test index 6bc08cfbd8cd..a22bfe7b494f 100644 --- a/tests/sqllogictests/suites/base/09_fuse_engine/09_0026_merge_into_separate_pipeline.test +++ b/tests/sqllogictests/suites/base/09_fuse_engine/09_0026_merge_into_separate_pipeline.test @@ -1,96 +1,7 @@ -statement ok -set enable_experimental_merge_into = 1; - statement ok set enable_distributed_merge_into = 1; -statement ok -drop table if exists t1_separate; - -statement ok -drop table if exists t2_separate; - -statement ok -create table t1_separate(a int,b string, c string); - -statement ok -create table t2_separate(a int,b string, c string); - -statement ok -insert into t1_separate values(1,'a1','b1'),(2,'a2','b2'); - -statement ok -insert into t1_separate values(3,'a3','b3'),(4,'a4','b4'); - -statement ok -insert into t2_separate values(1,'a5','b5'),(3,'a6','b6'); - -statement ok -insert into t2_separate values(8,'a8','b8'),(9,'a9','b9'); - -query TTT -select * from t1_separate order by a,b,c; ----- -1 a1 b1 -2 a2 b2 -3 a3 b3 -4 a4 b4 - -query TTT -select * from t2_separate order by a,b,c; ----- -1 a5 b5 -3 a6 b6 -8 a8 b8 -9 a9 b9 - -## 1.test matched only -statement ok -merge into t1_separate as t1 using (select * from t2_separate) as t2 on t1.a = t2.a when matched and t2.b = 'a5' then update * when matched then update *; - -query TTT -select * from t1_separate order by a,b,c; ----- -1 a5 b5 -2 a2 b2 -3 a6 b6 -4 a4 b4 - -## 2.test insert only -statement ok -merge into t1_separate as t1 using (select * from t2_separate) as t2 on t1.a = t2.a when not matched and t2.b = 'a5' then insert * when not matched then insert *; - -query TTT -select * from t1_separate order by a,b,c; ----- -1 a5 b5 -2 a2 b2 -3 a6 b6 -4 a4 b4 -8 a8 b8 -9 a9 b9 - -## insert only semantic test -statement error 1065 -merge into t1_separate as t1 using (select * from t2_separate) as t2 on t1.a = t2.a when not matched and t2.b = 'a5' then insert (b) values(t1.a); - -statement ok -truncate table t1_separate; - -## test merge into empty table for insert-only -statement ok -merge into t1_separate as t1 using (select * from t2_separate) as t2 on t1.a = t2.a when not matched and t2.b = 'a5' then insert * when not matched then insert *; - -query TTT -select * from t1_separate order by a,b,c; ----- -1 a5 b5 -3 a6 b6 -8 a8 b8 -9 a9 b9 - -statement ok -set enable_experimental_merge_into = 0; +include ./09_0035_merge_into_separate_pipeline_without_distributed_enable.test statement ok set enable_distributed_merge_into = 0; \ No newline at end of file diff --git a/tests/sqllogictests/suites/base/09_fuse_engine/09_0028_distributed_merge_into.test b/tests/sqllogictests/suites/base/09_fuse_engine/09_0028_distributed_merge_into.test index 4045faeda154..0614e1a138f5 100644 --- a/tests/sqllogictests/suites/base/09_fuse_engine/09_0028_distributed_merge_into.test +++ b/tests/sqllogictests/suites/base/09_fuse_engine/09_0028_distributed_merge_into.test @@ -1,172 +1,7 @@ -statement ok -set enable_experimental_merge_into = 1; - statement ok set enable_distributed_merge_into = 1; -## distributed test -statement ok -drop table if exists distributed_target_test; - -statement ok -drop table if exists distributed_source_test; - -statement ok -create table distributed_target_test(a int,b string); - -## multi blocks -statement ok -insert into distributed_target_test values(1,'a'),(2,'b'),(3,'c'); - -statement ok -insert into distributed_target_test values(1,'a'),(2,'b'),(3,'c'); - -statement ok -insert into distributed_target_test values(6,'z'),(7,'y'),(8,'x'); - -statement ok -insert into distributed_target_test values(6,'z'),(7,'y'),(8,'x'); - -query TT -select * from distributed_target_test order by a; ----- -1 a -1 a -2 b -2 b -3 c -3 c -6 z -6 z -7 y -7 y -8 x -8 x - -statement ok -create table distributed_source_test(a int,b string,is_databend_deleted bool); - -statement ok -insert into distributed_source_test values(1,'d',true),(2,'e',true),(3,'f',false),(4,'e',true),(5,'f',false); - -statement ok -insert into distributed_source_test values(6,'h',true),(7,'i',true),(8,'j',false),(9,'k',true),(10,'l',false); - -statement ok -create stage distributed_source_parquet file_format = (type = parquet); - -statement ok -remove @distributed_source_parquet; - -statement ok -copy into @distributed_source_parquet from (select * from distributed_source_test); - -query TTT -merge into `distributed_target_test` as tt using (select `a`,`b`,`is_databend_deleted` from @distributed_source_parquet (pattern => '.*[.]parquet')) as ss on (ss.`a` = tt.`a`) -when matched and ss.`is_databend_deleted` = true then delete when matched then update * when not matched and ss.`is_databend_deleted` = false then insert *; ----- -2 4 8 - -query TT -select * from distributed_target_test order by a; ----- -3 f -3 f -5 f -8 j -8 j -10 l - -## corner case test -## when a node matched all source, it will give a empty block, -## so in this case, we should not insert anything. -statement ok -drop table if exists corner_target_table; - -statement ok -create table corner_target_table(a int,b string,c string); - -statement ok -drop table if exists corner_source_table; - -statement ok -create table corner_source_table(a int,b string,c string); - -## add block1 -statement ok -insert into corner_target_table values(1,'a1','b1'),(2,'a2','b2'); - -## add block2 -statement ok -insert into corner_target_table values(1,'a3','b3'),(2,'a4','b4'),(3,'a5','b5'); - -## add two blocks for corner_source_table -statement ok -insert into corner_source_table values(1,'a6','b6'),(2,'a7','b7'); - -statement ok -insert into corner_source_table values(3,'a8','b8'); - -query TTT -select * from corner_target_table order by a,b; ----- -1 a1 b1 -1 a3 b3 -2 a2 b2 -2 a4 b4 -3 a5 b5 - -query TTT -select * from corner_source_table order by a,b; ----- -1 a6 b6 -2 a7 b7 -3 a8 b8 - -query TTT -merge into corner_target_table as t1 using (select * from corner_source_table) as t2 on t1.a = t2.a when matched then update * when not matched then insert *; ----- -0 5 - -query TTT -select * from corner_target_table order by a,b; ----- -1 a6 b6 -1 a6 b6 -2 a7 b7 -2 a7 b7 -3 a8 b8 - -## test wide-table -statement ok -drop table if exists distributed_test_order; - -statement ok -drop table if exists distributed_random_source; - -statement ok -create table distributed_test_order(id bigint, id1 bigint, id2 bigint, id3 bigint, id4 bigint, id5 bigint, id6 bigint, id7 bigint, s1 varchar, s2 varchar, s3 varchar, s4 varchar, s5 varchar, s6 varchar, s7 varchar, s8 varchar, s9 varchar, s10 varchar, s11 varchar, s12 varchar, s13 varchar, d1 DECIMAL(20, 8), d2 DECIMAL(20, 8), d3 DECIMAL(20, 8), d4 DECIMAL(20, 8), d5 DECIMAL(20, 8), d6 DECIMAL(30, 8), d7 DECIMAL(30, 8), d8 DECIMAL(30, 8), d9 DECIMAL(30, 8), d10 DECIMAL(30, 8),insert_time datetime, insert_time1 datetime, insert_time2 datetime, insert_time3 datetime,i int) CLUSTER BY(to_yyyymmdd(insert_time), id) bloom_index_columns='insert_time,id'; - -statement ok -create table distributed_random_source(id bigint not null, id1 bigint, id2 bigint, id3 bigint, id4 bigint, id5 bigint, id6 bigint, id7 bigint,s1 varchar, s2 varchar, s3 varchar, s4 varchar, s5 varchar, s6 varchar, s7 varchar, s8 varchar, s9 varchar, s10 varchar, s11 varchar, s12 varchar, s13 varchar,d1 DECIMAL(20, 8), d2 DECIMAL(20, 8), d3 DECIMAL(20, 8), d4 DECIMAL(20, 8), d5 DECIMAL(20, 8), d6 DECIMAL(30, 8), d7 DECIMAL(30, 8), d8 DECIMAL(30, 8), d9 DECIMAL(30, 8), d10 DECIMAL(30, 8),insert_time datetime not null, insert_time1 datetime, insert_time2 datetime, insert_time3 datetime,i int) Engine = Random; - -statement ok -create table distributed_random_store(id bigint not null, id1 bigint, id2 bigint, id3 bigint, id4 bigint, id5 bigint, id6 bigint, id7 bigint,s1 varchar, s2 varchar, s3 varchar, s4 varchar, s5 varchar, s6 varchar, s7 varchar, s8 varchar, s9 varchar, s10 varchar, s11 varchar, s12 varchar, s13 varchar,d1 DECIMAL(20, 8), d2 DECIMAL(20, 8), d3 DECIMAL(20, 8), d4 DECIMAL(20, 8), d5 DECIMAL(20, 8), d6 DECIMAL(30, 8), d7 DECIMAL(30, 8), d8 DECIMAL(30, 8), d9 DECIMAL(30, 8), d10 DECIMAL(30, 8),insert_time datetime not null, insert_time1 datetime, insert_time2 datetime, insert_time3 datetime,i int); - -statement ok -insert into distributed_random_store (select id,34 as id1,238 as id2, id3, id4, id5, id6, id7,s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13,d1, d2, d3, d4, d5, d6, d7, d8, d9, d10,insert_time,insert_time1,insert_time2,insert_time3,i from distributed_random_source limit 10); - -statement ok -insert into distributed_random_store (select id,34 as id1,238 as id2, id3, id4, id5, id6, id7,s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13,d1, d2, d3, d4, d5, d6, d7, d8, d9, d10,insert_time,insert_time1,insert_time2,insert_time3,i from distributed_random_source limit 10); - -statement ok -merge into distributed_test_order as t using (select id,34 as id1,238 as id2, id3, id4, id5, id6, id7,s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13,d1, d2, d3, d4, d5, d6, d7, d8, d9, d10,insert_time,insert_time1,insert_time2,insert_time3,i from distributed_random_store) as s on t.id = s.id and t.insert_time = s.insert_time when matched then update * when not matched then insert *; - -statement ok -merge into distributed_test_order as t using (select id,34 as id1,238 as id2, id3, id4, id5, id6, id7,s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13,d1, d2, d3, d4, d5, d6, d7, d8, d9, d10,insert_time,insert_time1,insert_time2,insert_time3,i from distributed_random_store) as s on t.id = s.id and t.insert_time = s.insert_time when matched then update * when not matched then insert *; - -statement ok -set enable_experimental_merge_into = 0; +include ./09_0033_distributed_merge_into_without_enable.test statement ok -set enable_distributed_merge_into = 0; \ No newline at end of file +set enable_distributed_merge_into = 0; diff --git a/tests/sqllogictests/suites/base/09_fuse_engine/09_0032_pr13848.test b/tests/sqllogictests/suites/base/09_fuse_engine/09_0032_pr13848.test index fbb2fdc54eb4..1d6d37689af3 100644 --- a/tests/sqllogictests/suites/base/09_fuse_engine/09_0032_pr13848.test +++ b/tests/sqllogictests/suites/base/09_fuse_engine/09_0032_pr13848.test @@ -1,118 +1,7 @@ -## test window,agg,join's correctess -statement ok -set enable_experimental_merge_into = 1; - statement ok set enable_distributed_merge_into = 1; -statement ok -create table merge_target_0(a int,b string); - -statement ok -create table merge_source_0(a int,b string); - -statement ok -insert into merge_target_0 values(1,'a1'),(2,'b1'); - -statement ok -insert into merge_target_0 values(3,'a2'),(4,'b2'); - -statement ok -insert into merge_source_0 values(1,'a3'),(3,'b3'); - -statement ok -insert into merge_source_0 values(5,'a4'),(6,'b6'); - -## test window,agg,join -## 1. join test -query TT -merge into merge_target_0 as t1 using -(select t2.a,t3.b from merge_source_0 as t2 inner join merge_source_0 as t3 on t2.a = t3.a) as t4 -on t4.a = t1.a when matched then update * when not matched then insert *; ----- -2 2 - -query TT -select * from merge_target_0 order by a,b; ----- -1 a3 -2 b1 -3 b3 -4 b2 -5 a4 -6 b6 - -statement ok -truncate table merge_source_0; - -statement ok -insert into merge_source_0 values(1,'c7'),(3,'c7'); - -query TT -select * from merge_source_0 order by a,b; ----- -1 c7 -3 c7 - -## 2. agg test -query TT -merge into merge_target_0 as t1 using (select avg(a) as a,b from merge_source_0 group by b) as t2 on t1.a = t2.a -when matched then update * when not matched then insert *; ----- -0 1 - -query TT -select * from merge_target_0 order by a,b; ----- -1 a3 -2 c7 -3 b3 -4 b2 -5 a4 -6 b6 - -## 2. window func test -query TT -merge into merge_target_0 as t1 using (select row_number() OVER (PARTITION BY b ORDER BY a) as a,'d1' as b from merge_source_0) as t2 on t1.a = t2.a -when matched then update * when not matched then insert *; ----- -0 2 - -query TT -select * from merge_target_0 order by a,b; ----- -1 d1 -2 d1 -3 b3 -4 b2 -5 a4 -6 b6 - -### test copy into table unsupport -statement ok -create table copy_table_test0(a int,b string); - -statement ok -create stage parquet_table0 FILE_FORMAT = (TYPE = PARQUET); - -statement ok -copy into @parquet_table0 from (select a,b from merge_source_0 limit 2) - -## test agg -statement error 1065 -copy into copy_table_test0 from (select avg($1) as a,'b' as b from @parquet_table0); - -## test window -statement error 1065 -copy into copy_table_test0 from (select row_number() OVER (PARTITION BY b ORDER BY a) as a,'d1' as b from @parquet_table0); - -## test join, but it's not checked as `copy into table source can't contain window|aggregate|udf|join functions` -## it's `query as source of copy only allow projection on one stage table.`, because it always support only one table. -statement error 1005 -copy into copy_table_test0 from (select t2.a,t3.b from @parquet_table0 as t ,@parquet_table0 as t3 where t2.a = t3.a); +include ./09_0034_pr13848_without_distributed_enable.test statement ok set enable_distributed_merge_into = 0; - -statement ok -set enable_experimental_merge_into = 0; \ No newline at end of file diff --git a/tests/sqllogictests/suites/base/09_fuse_engine/09_0033_distributed_merge_into_without_enable.test b/tests/sqllogictests/suites/base/09_fuse_engine/09_0033_distributed_merge_into_without_enable.test new file mode 100644 index 000000000000..9116e5107178 --- /dev/null +++ b/tests/sqllogictests/suites/base/09_fuse_engine/09_0033_distributed_merge_into_without_enable.test @@ -0,0 +1,223 @@ +## we need to test without enable_distributed_merge_into in cluster environment. +statement ok +set enable_experimental_merge_into = 1; + +## distributed test +statement ok +drop table if exists distributed_target_test; + +statement ok +drop table if exists distributed_source_test; + +statement ok +create table distributed_target_test(a int,b string); + +## multi blocks +statement ok +insert into distributed_target_test values(1,'a'),(2,'b'),(3,'c'); + +statement ok +insert into distributed_target_test values(1,'a'),(2,'b'),(3,'c'); + +statement ok +insert into distributed_target_test values(6,'z'),(7,'y'),(8,'x'); + +statement ok +insert into distributed_target_test values(6,'z'),(7,'y'),(8,'x'); + +query TT +select * from distributed_target_test order by a; +---- +1 a +1 a +2 b +2 b +3 c +3 c +6 z +6 z +7 y +7 y +8 x +8 x + +statement ok +create table distributed_source_test(a int,b string,is_databend_deleted bool); + +statement ok +insert into distributed_source_test values(1,'d',true),(2,'e',true),(3,'f',false),(4,'e',true),(5,'f',false); + +statement ok +insert into distributed_source_test values(6,'h',true),(7,'i',true),(8,'j',false),(9,'k',true),(10,'l',false); + +statement ok +create stage distributed_source_parquet file_format = (type = parquet); + +statement ok +remove @distributed_source_parquet; + +statement ok +copy into @distributed_source_parquet from (select * from distributed_source_test); + +query TTT +merge into `distributed_target_test` as tt using (select `a`,`b`,`is_databend_deleted` from @distributed_source_parquet (pattern => '.*[.]parquet')) as ss on (ss.`a` = tt.`a`) +when matched and ss.`is_databend_deleted` = true then delete when matched then update * when not matched and ss.`is_databend_deleted` = false then insert *; +---- +2 4 8 + +query TT +select * from distributed_target_test order by a; +---- +3 f +3 f +5 f +8 j +8 j +10 l + +## corner case test +## when a node matched all source, it will give a empty block, +## so in this case, we should not insert anything. +statement ok +drop table if exists corner_target_table; + +statement ok +create table corner_target_table(a int,b string,c string); + +statement ok +drop table if exists corner_source_table; + +statement ok +create table corner_source_table(a int,b string,c string); + +## add block1 +statement ok +insert into corner_target_table values(1,'a1','b1'),(2,'a2','b2'); + +## add block2 +statement ok +insert into corner_target_table values(1,'a3','b3'),(2,'a4','b4'),(3,'a5','b5'); + +## add two blocks for corner_source_table +statement ok +insert into corner_source_table values(1,'a6','b6'),(2,'a7','b7'); + +statement ok +insert into corner_source_table values(3,'a8','b8'); + +query TTT +select * from corner_target_table order by a,b; +---- +1 a1 b1 +1 a3 b3 +2 a2 b2 +2 a4 b4 +3 a5 b5 + +query TTT +select * from corner_source_table order by a,b; +---- +1 a6 b6 +2 a7 b7 +3 a8 b8 + +query TTT +merge into corner_target_table as t1 using (select * from corner_source_table) as t2 on t1.a = t2.a when matched then update * when not matched then insert *; +---- +0 5 + +query TTT +select * from corner_target_table order by a,b; +---- +1 a6 b6 +1 a6 b6 +2 a7 b7 +2 a7 b7 +3 a8 b8 + +## test wide-table +statement ok +drop table if exists distributed_test_order; + +statement ok +drop table if exists distributed_random_source; + +statement ok +create table distributed_test_order(id bigint, id1 bigint, id2 bigint, id3 bigint, id4 bigint, id5 bigint, id6 bigint, id7 bigint, s1 varchar, s2 varchar, s3 varchar, s4 varchar, s5 varchar, s6 varchar, s7 varchar, s8 varchar, s9 varchar, s10 varchar, s11 varchar, s12 varchar, s13 varchar, d1 DECIMAL(20, 8), d2 DECIMAL(20, 8), d3 DECIMAL(20, 8), d4 DECIMAL(20, 8), d5 DECIMAL(20, 8), d6 DECIMAL(30, 8), d7 DECIMAL(30, 8), d8 DECIMAL(30, 8), d9 DECIMAL(30, 8), d10 DECIMAL(30, 8),insert_time datetime, insert_time1 datetime, insert_time2 datetime, insert_time3 datetime,i int) CLUSTER BY(to_yyyymmdd(insert_time), id) bloom_index_columns='insert_time,id'; + +statement ok +create table distributed_random_source(id bigint not null, id1 bigint, id2 bigint, id3 bigint, id4 bigint, id5 bigint, id6 bigint, id7 bigint,s1 varchar, s2 varchar, s3 varchar, s4 varchar, s5 varchar, s6 varchar, s7 varchar, s8 varchar, s9 varchar, s10 varchar, s11 varchar, s12 varchar, s13 varchar,d1 DECIMAL(20, 8), d2 DECIMAL(20, 8), d3 DECIMAL(20, 8), d4 DECIMAL(20, 8), d5 DECIMAL(20, 8), d6 DECIMAL(30, 8), d7 DECIMAL(30, 8), d8 DECIMAL(30, 8), d9 DECIMAL(30, 8), d10 DECIMAL(30, 8),insert_time datetime not null, insert_time1 datetime, insert_time2 datetime, insert_time3 datetime,i int) Engine = Random; + +statement ok +create table distributed_random_store(id bigint not null, id1 bigint, id2 bigint, id3 bigint, id4 bigint, id5 bigint, id6 bigint, id7 bigint,s1 varchar, s2 varchar, s3 varchar, s4 varchar, s5 varchar, s6 varchar, s7 varchar, s8 varchar, s9 varchar, s10 varchar, s11 varchar, s12 varchar, s13 varchar,d1 DECIMAL(20, 8), d2 DECIMAL(20, 8), d3 DECIMAL(20, 8), d4 DECIMAL(20, 8), d5 DECIMAL(20, 8), d6 DECIMAL(30, 8), d7 DECIMAL(30, 8), d8 DECIMAL(30, 8), d9 DECIMAL(30, 8), d10 DECIMAL(30, 8),insert_time datetime not null, insert_time1 datetime, insert_time2 datetime, insert_time3 datetime,i int); + +statement ok +insert into distributed_random_store (select id,34 as id1,238 as id2, id3, id4, id5, id6, id7,s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13,d1, d2, d3, d4, d5, d6, d7, d8, d9, d10,insert_time,insert_time1,insert_time2,insert_time3,i from distributed_random_source limit 10); + +statement ok +insert into distributed_random_store (select id,34 as id1,238 as id2, id3, id4, id5, id6, id7,s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13,d1, d2, d3, d4, d5, d6, d7, d8, d9, d10,insert_time,insert_time1,insert_time2,insert_time3,i from distributed_random_source limit 10); + +statement ok +merge into distributed_test_order as t using (select id,34 as id1,238 as id2, id3, id4, id5, id6, id7,s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13,d1, d2, d3, d4, d5, d6, d7, d8, d9, d10,insert_time,insert_time1,insert_time2,insert_time3,i from distributed_random_store) as s on t.id = s.id and t.insert_time = s.insert_time when matched then update * when not matched then insert *; + +statement ok +merge into distributed_test_order as t using (select id,34 as id1,238 as id2, id3, id4, id5, id6, id7,s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13,d1, d2, d3, d4, d5, d6, d7, d8, d9, d10,insert_time,insert_time1,insert_time2,insert_time3,i from distributed_random_store) as s on t.id = s.id and t.insert_time = s.insert_time when matched then update * when not matched then insert *; + +statement ok +create table orders2(a int,b string,c string); + +statement ok +insert into orders2 values(1,'a1','b1'),(2,'a2','b2'),(3,'a3','b3'); + +statement ok +insert into orders2 values(1,'a1','b1'),(2,'a2','b2'),(3,'a3','b3'); + +statement error 4001 +MERGE INTO orders2 USING ( + SELECT o.a,o.b + FROM orders2 o + INNER JOIN ( + SELECT a, b, AVG(a) AS avg_quantity + FROM orders2 + GROUP BY a, b + ) a ON o.a = a.a AND o.b = a.b +) AS joined_data ON orders2.a = joined_data.a + WHEN MATCHED THEN + UPDATE SET orders2.b = joined_data.b; + +statement ok +truncate table orders2; + +statement ok +insert into orders2 values(1,'a1','b1'); + +statement ok +insert into orders2 values(2,'a2','b2'); + +statement ok +insert into orders2 values(3,'a3','b3'); + +query T +MERGE INTO orders2 USING ( + SELECT o.a,o.b,a.avg_quantity + FROM orders2 o + INNER JOIN ( + SELECT a, b, AVG(a)+1 AS avg_quantity + FROM orders2 + GROUP BY a, b + ) a ON o.a = a.a AND o.b = a.b +) AS joined_data ON orders2.a = joined_data.a + WHEN MATCHED THEN + UPDATE SET orders2.b = joined_data.b,orders2.a = avg_quantity; +---- +3 + +query TTT +select * from orders2 order by a,b,c; +---- +2 a1 b1 +3 a2 b2 +4 a3 b3 + +statement ok +set enable_experimental_merge_into = 0; diff --git a/tests/sqllogictests/suites/base/09_fuse_engine/09_0034_pr13848_without_distributed_enable.test b/tests/sqllogictests/suites/base/09_fuse_engine/09_0034_pr13848_without_distributed_enable.test new file mode 100644 index 000000000000..a8237fab04c2 --- /dev/null +++ b/tests/sqllogictests/suites/base/09_fuse_engine/09_0034_pr13848_without_distributed_enable.test @@ -0,0 +1,112 @@ +## test window,agg,join's correctess +statement ok +set enable_experimental_merge_into = 1; + +statement ok +create table merge_target_0(a int,b string); + +statement ok +create table merge_source_0(a int,b string); + +statement ok +insert into merge_target_0 values(1,'a1'),(2,'b1'); + +statement ok +insert into merge_target_0 values(3,'a2'),(4,'b2'); + +statement ok +insert into merge_source_0 values(1,'a3'),(3,'b3'); + +statement ok +insert into merge_source_0 values(5,'a4'),(6,'b6'); + +## test window,agg,join +## 1. join test +query TT +merge into merge_target_0 as t1 using +(select t2.a,t3.b from merge_source_0 as t2 inner join merge_source_0 as t3 on t2.a = t3.a) as t4 +on t4.a = t1.a when matched then update * when not matched then insert *; +---- +2 2 + +query TT +select * from merge_target_0 order by a,b; +---- +1 a3 +2 b1 +3 b3 +4 b2 +5 a4 +6 b6 + +statement ok +truncate table merge_source_0; + +statement ok +insert into merge_source_0 values(1,'c7'),(3,'c7'); + +query TT +select * from merge_source_0 order by a,b; +---- +1 c7 +3 c7 + +## 2. agg test +query TT +merge into merge_target_0 as t1 using (select avg(a) as a,b from merge_source_0 group by b) as t2 on t1.a = t2.a +when matched then update * when not matched then insert *; +---- +0 1 + +query TT +select * from merge_target_0 order by a,b; +---- +1 a3 +2 c7 +3 b3 +4 b2 +5 a4 +6 b6 + +## 2. window func test +query TT +merge into merge_target_0 as t1 using (select row_number() OVER (PARTITION BY b ORDER BY a) as a,'d1' as b from merge_source_0) as t2 on t1.a = t2.a +when matched then update * when not matched then insert *; +---- +0 2 + +query TT +select * from merge_target_0 order by a,b; +---- +1 d1 +2 d1 +3 b3 +4 b2 +5 a4 +6 b6 + +### test copy into table unsupport +statement ok +create table copy_table_test0(a int,b string); + +statement ok +create stage parquet_table0 FILE_FORMAT = (TYPE = PARQUET); + +statement ok +copy into @parquet_table0 from (select a,b from merge_source_0 limit 2) + +## test agg +statement error 1065 +copy into copy_table_test0 from (select avg($1) as a,'b' as b from @parquet_table0); + +## test window +statement error 1065 +copy into copy_table_test0 from (select row_number() OVER (PARTITION BY b ORDER BY a) as a,'d1' as b from @parquet_table0); + +## test join, but it's not checked as `copy into table source can't contain window|aggregate|udf|join functions` +## it's `query as source of copy only allow projection on one stage table.`, because it always support only one table. +statement error 1005 +copy into copy_table_test0 from (select t2.a,t3.b from @parquet_table0 as t ,@parquet_table0 as t3 where t2.a = t3.a); + +statement ok +set enable_experimental_merge_into = 0; \ No newline at end of file diff --git a/tests/sqllogictests/suites/base/09_fuse_engine/09_0035_merge_into_separate_pipeline_without_distributed_enable.test b/tests/sqllogictests/suites/base/09_fuse_engine/09_0035_merge_into_separate_pipeline_without_distributed_enable.test new file mode 100644 index 000000000000..939de478f5c0 --- /dev/null +++ b/tests/sqllogictests/suites/base/09_fuse_engine/09_0035_merge_into_separate_pipeline_without_distributed_enable.test @@ -0,0 +1,96 @@ +statement ok +set enable_experimental_merge_into = 1; + +statement ok +drop table if exists t1_separate; + +statement ok +drop table if exists t2_separate; + +statement ok +create table t1_separate(a int,b string, c string); + +statement ok +create table t2_separate(a int,b string, c string); + +statement ok +insert into t1_separate values(1,'a1','b1'),(2,'a2','b2'); + +statement ok +insert into t1_separate values(3,'a3','b3'),(4,'a4','b4'); + +statement ok +insert into t2_separate values(1,'a5','b5'),(3,'a6','b6'); + +statement ok +insert into t2_separate values(8,'a8','b8'),(9,'a9','b9'); + +query TTT +select * from t1_separate order by a,b,c; +---- +1 a1 b1 +2 a2 b2 +3 a3 b3 +4 a4 b4 + +query TTT +select * from t2_separate order by a,b,c; +---- +1 a5 b5 +3 a6 b6 +8 a8 b8 +9 a9 b9 + +## 1.test matched only +query T +merge into t1_separate as t1 using (select * from t2_separate) as t2 on t1.a = t2.a when matched and t2.b = 'a5' then update * when matched then update *; +---- +2 + +query TTT +select * from t1_separate order by a,b,c; +---- +1 a5 b5 +2 a2 b2 +3 a6 b6 +4 a4 b4 + +## 2.test insert only +query T +merge into t1_separate as t1 using (select * from t2_separate) as t2 on t1.a = t2.a when not matched and t2.b = 'a5' then insert * when not matched then insert *; +---- +2 + +query TTT +select * from t1_separate order by a,b,c; +---- +1 a5 b5 +2 a2 b2 +3 a6 b6 +4 a4 b4 +8 a8 b8 +9 a9 b9 + +## insert only semantic test +statement error 1065 +merge into t1_separate as t1 using (select * from t2_separate) as t2 on t1.a = t2.a when not matched and t2.b = 'a5' then insert (b) values(t1.a); + +statement ok +truncate table t1_separate; + +## test merge into empty table for insert-only +query T +merge into t1_separate as t1 using (select * from t2_separate) as t2 on t1.a = t2.a when not matched and t2.b = 'a5' then insert * when not matched then insert *; +---- +4 + +query TTT +select * from t1_separate order by a,b,c; +---- +1 a5 b5 +3 a6 b6 +8 a8 b8 +9 a9 b9 + +statement ok +set enable_experimental_merge_into = 0; diff --git a/tests/sqllogictests/suites/base/09_fuse_engine/09_0036_merge_into_without_distributed_enable.test b/tests/sqllogictests/suites/base/09_fuse_engine/09_0036_merge_into_without_distributed_enable.test new file mode 100644 index 000000000000..bea9bec5cffd --- /dev/null +++ b/tests/sqllogictests/suites/base/09_fuse_engine/09_0036_merge_into_without_distributed_enable.test @@ -0,0 +1,1028 @@ +statement ok +set enable_experimental_merge_into = 1; + +statement ok +drop table if exists t1; + +statement ok +drop table if exists t2; + +statement ok +create table t1(a int,b string, c string); + +statement ok +create table t2(a int,b string, c string); + +statement ok +insert into t1 values(1,'b1','c1'),(2,'b2','c2'); + +statement ok +insert into t1 values(2,'b3','c3'),(3,'b4','c4'); + +query TTT +select * from t1 order by a,b,c; +---- +1 b1 c1 +2 b2 c2 +2 b3 c3 +3 b4 c4 + +statement ok +insert into t2 values(1,'b_5','c_5'),(3,'b_6','c_6'); + +statement ok +insert into t2 values(2,'b_7','c_7'); + +query TTT +select * from t2 order by a,b,c; +---- +1 b_5 c_5 +2 b_7 c_7 +3 b_6 c_6 + +## test source alias +statement error 1005 +merge into t1 using (select * from t2 ) on t1.a = t2.a when matched then update set t1.c = t2.c,t1.c = t2.c; + +# section I: basic test for match and unmatch + +statement error 1006 +merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then update set t1.c = t2.c,t1.c = t2.c; + +query T +merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then update set t1.c = t2.c; +---- +4 + +query TTT +select * from t1 order by a,b,c; +---- +1 b1 c_5 +2 b2 c_7 +2 b3 c_7 +3 b4 c_6 + +statement ok +insert into t2 values(4,'b_8','c_8'); + +query TTT +select * from t2 order by a,b,c; +---- +1 b_5 c_5 +2 b_7 c_7 +3 b_6 c_6 +4 b_8 c_8 + +query TT +merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then update set t1.c = t2.c when not matched then insert (a,b,c) values(t2.a,t2.b,t2.c); +---- +1 4 + +query TTT +select * from t1 order by a,b,c; +---- +1 b1 c_5 +2 b2 c_7 +2 b3 c_7 +3 b4 c_6 +4 b_8 c_8 + +statement ok +insert into t2 values(1,'b_9','c_9'); + +statement error 4001 +merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then update set t1.c = t2.c when not matched then insert (a,b,c) values(t2.a,t2.b,t2.c); + +query TTT +select * from t1 order by a,b,c; +---- +1 b1 c_5 +2 b2 c_7 +2 b3 c_7 +3 b4 c_6 +4 b_8 c_8 + +statement ok +delete from t2 where a = 1; + +query TTT +select * from t2 order by a,b,c; +---- +2 b_7 c_7 +3 b_6 c_6 +4 b_8 c_8 + +statement ok +insert into t2 values(5,'b_9','c_9'); + +query TTT +select * from t2 order by a,b,c; +---- +2 b_7 c_7 +3 b_6 c_6 +4 b_8 c_8 +5 b_9 c_9 + +query TT +merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then delete; +---- +4 + +query ITT +select * from t1 order by a,b,c; +---- +1 b1 c_5 + +# section 2 multi clauses +statement ok +insert into t1 values(2,'b_1','c_1'),(3,'b_2','c_2'); + +query TTT +select * from t1 order by a,b,c; +---- +1 b1 c_5 +2 b_1 c_1 +3 b_2 c_2 + +statement error 1065 +merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then delete when matched then update set t1.c = t2.c when not matched and t2.c = 'c_8' then insert (a,b,c) values(t2.a,t2.b,t2.c); + +query TTT +merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched and t1.b = 'b_1' then delete when matched then update set t1.c = t2.c when not matched and t2.c = 'c_8' then insert (a,b,c) values(t2.a,t2.b,t2.c); +---- +1 1 1 + +query TTT +select * from t1 order by a,b,c; +---- +1 b1 c_5 +3 b_2 c_6 +4 b_8 c_8 + +query TT +merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then delete when not matched and t2.c = 'c_9' then insert (a,b,c) values(t2.a,t2.b,t2.c); +---- +1 2 + +query TTT +select * from t1 order by a,b,c; +---- +1 b1 c_5 +5 b_9 c_9 + +query T +merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when not matched and t2.c = 'c_8' then insert (a,b) values(t2.a,t2.b) when not matched and t2.c = 'c_7' then insert (a,c) values(t2.a,t2.c); +---- +2 + +query TTT +select * from t1 order by a,b,c; +---- +1 b1 c_5 +2 NULL c_7 +4 b_8 NULL +5 b_9 c_9 + +statement ok +insert into t2 values(5,'b_10','c_10'); + +query TTT +select * from t2 order by a,b,c; +---- +2 b_7 c_7 +3 b_6 c_6 +4 b_8 c_8 +5 b_10 c_10 +5 b_9 c_9 + +statement error 4001 +merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched and t2.c = 'c_9' then update set t1.b = 'b_11' when matched and t2.c = 'c_10' then delete; + +## idempotent delete test +query T +merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then delete; +---- +3 + +query TTT +select * from t1 order by a,b,c; +---- +1 b1 c_5 + +## test star for merge into +statement ok +truncate table t1; + +statement ok +truncate table t2; + +query I +select count(*) from t1; +---- +0 + +query I +select count(*) from t2; +---- +0 + +statement ok +insert into t1 values(1,'b1','c1'),(2,'b2','c2'); + +query TTT +select * from t1 order by a,b,c; +---- +1 b1 c1 +2 b2 c2 + +statement ok +insert into t2 values(1,'b3','c3'),(3,'b4','c4'); + +query TTT +select * from t2 order by a,b,c; +---- +1 b3 c3 +3 b4 c4 + +## test insert columns mismatch +statement error 1065 +merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when not matched then insert values(t2.a,t2.c); + +query TT +merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then update * when not matched then insert *; +---- +1 1 + + +query TTT +select * from t1 order by a,b,c; +---- +1 b3 c3 +2 b2 c2 +3 b4 c4 + +## test multi same name for star +statement error 1065 +merge into t1 using (select a,b,c,a from t2 ) as t2 on t1.a = t2.a when matched then update *; + +statement error 1065 +merge into t1 using (select a,b,c,a,b from t2 ) as t2 on t1.a = t2.a when not matched then insert *; + +## stage file test +statement ok +drop table if exists test_stage; + +statement ok +drop table if exists target_table; + +statement ok +create table target_table(a int,b string,c string); + +statement ok +insert into target_table values(1,'a_1','b_1'),(2,'a_2','b_2'); + +query TTT +select * from target_table order by a,b,c; +---- +1 a_1 b_1 +2 a_2 b_2 + +statement ok +create table test_stage(a int,b string,c string); + +statement ok +insert into test_stage values(1,'a1','b1'),(2,'a2','b2'),(3,'a3','b3'); + +query TTT +select * from test_stage order by a,b,c; +---- +1 a1 b1 +2 a2 b2 +3 a3 b3 + +statement ok +drop stage if exists s5_merge_into; + +statement ok +drop stage if exists s4_merge_into; + +statement ok +create stage s5_merge_into FILE_FORMAT = (TYPE = CSV); + +statement ok +remove @s5_merge_into; + +statement ok +copy into @s5_merge_into from (select a,b,c from test_stage order by a,b,c); + +query TTT +select $1,$2,$3 from @s5_merge_into order by $1,$2,$3; +---- +1 a1 b1 +2 a2 b2 +3 a3 b3 + +## test CSV +query TT +merge into target_table using (select $1,$2,$3 from @s5_merge_into) as cdc on cast(cdc.$1 as int) = target_table.a when matched then delete when not matched then insert values(cdc.$1,cdc.$2,cdc.$3); +---- +1 2 + +query TTT +select * from target_table order by a,b,c; +---- +3 a3 b3 + +## test parquet +statement ok +truncate table target_table; + +query I +select count(*) from target_table; +---- +0 + +statement ok +create stage s4_merge_into FILE_FORMAT = (TYPE = PARQUET); + +statement ok +remove @s4_merge_into; + +statement ok +copy into @s4_merge_into from (select a,b,c from test_stage order by a,b,c); + +query TTT +select $1,$2,$3 from @s4_merge_into order by $1,$2,$3; +---- +1 a1 b1 +2 a2 b2 +3 a3 b3 + +statement ok +insert into target_table values(1,'a_1','b_1'),(2,'a_2','b_2'); + +query TTT +select * from target_table order by a,b,c; +---- +1 a_1 b_1 +2 a_2 b_2 + +query TT +merge into target_table using (select $1,$2,$3 from @s4_merge_into) as cdc on cdc.$1 = target_table.a when matched then delete when not matched then insert values(cdc.$1,cdc.$2,cdc.$3); +---- +1 2 + +query TTT +select * from target_table order by a,b,c; +---- +3 a3 b3 + +## NULL test, for join, if join_expr result is +## NULL, it will be treated as not macthed. +statement ok +truncate table t1; + +statement ok +truncate table t2; + +query I +select count(*) from t1; +---- +0 + +query I +select count(*) from t2; +---- +0 + +statement ok +insert into t1 values(NULL,'b_1','c_1'); + +query TTT +select * from t1 order by a,b,c; +---- +NULL b_1 c_1 + +statement ok +insert into t2 values(1,'b_4','c_4'),(2,'b_2','c_2'),(NULL,'b_3','c_3'); + +query TTT +select * from t2 order by a,b,c; +---- +1 b_4 c_4 +2 b_2 c_2 +NULL b_3 c_3 + +query TT +merge into t1 using (select * from t2) as t2 on t1.a = t2.a when matched then delete when not matched then insert *; +---- +3 0 + +query TTT +select * from t1 order by a,b,c; +---- +1 b_4 c_4 +2 b_2 c_2 +NULL b_1 c_1 +NULL b_3 c_3 + +query T +merge into t1 using (select * from t2) as t2 on t1.a = t2.a when matched then delete; +---- +2 + +query TTT +select * from t1 order by a,b,c; +---- +NULL b_1 c_1 +NULL b_3 c_3 + +statement ok +truncate table t1; + +statement ok +truncate table t2; + +query I +select count(*) from t1; +---- +0 + +query I +select count(*) from t2; +---- +0 + +## test target table alias +statement ok +insert into t2 values(1,'a1','b1'); + +query TT +merge into t1 as t3 using (select * from t2 ) as t2 on t3.a = t2.a when not matched then insert (a,b,c) values(t2.a,t2.b,t2.c); +---- +1 + +query TTT +select * from t1 order by a,b,c; +---- +1 a1 b1 + +statement ok +CREATE TABLE employees (employee_id INT, employee_name VARCHAR(255),department VARCHAR(255)); + +statement ok +CREATE TABLE salaries (employee_id INT,salary DECIMAL(10, 2)); + +statement ok +INSERT INTO employees VALUES(1, 'Alice', 'HR'),(2, 'Bob', 'IT'),(3, 'Charlie', 'Finance'),(4, 'David', 'HR'); + +statement ok +INSERT INTO salaries VALUES(1, 50000.00),(2, 60000.00); + +query TT +MERGE INTO salaries USING (SELECT * FROM employees) as employees ON salaries.employee_id = employees.employee_id WHEN MATCHED AND employees.department = 'HR' THEN UPDATE SET salaries.salary = salaries.salary + 1000.00 WHEN MATCHED THEN UPDATE SET salaries.salary = salaries.salary + 500.00 WHEN NOT MATCHED THEN INSERT (employee_id, salary) VALUES (employees.employee_id, 55000.00); +---- +2 2 + +query TTT +select * from salaries order by employee_id; +---- +1 51000.00 +2 60500.00 +3 55000.00 +4 55000.00 + +## null cast bug fix +statement ok +create table t1_target(a int not null); + +statement ok +create table t2_source(a int not null); + +statement ok +insert into t1_target values(1); + +statement ok +insert into t2_source values(1),(2); + +query TT +merge into t1_target using (select * from t2_source) as t2_source on t1_target.a = t2_source.a when matched then update * when not matched then insert *; +---- +1 1 + +query T +select * from t1_target order by a; +---- +1 +2 + +## cluster table test +statement ok +create table cluster_target(a int,b string,c int) cluster by(a,b); + +statement ok +create table cluster_source(a int,b string,c int); + +statement ok +insert into cluster_source values(12,'b',1),(1,'a',2),(2,'b',3),(2,'a',4),(3,'a',3); + +## test update indetify error +statement error 1006 +merge into cluster_target as t1 using (select * from cluster_source) as t2 on t1.a = t2.a when matched then update set cluster_target.a = t2.a; + +statement error 1006 +merge into cluster_target as t1 using (select * from cluster_source) as t2 on t1.a = t2.a when matched then update set t2.a = t2.a; + +query TT +merge into cluster_target as t1 using (select * from cluster_source) as t2 on t1.a = t2.a when not matched then insert *; +---- +5 + +# By default setting, all rows merged from `cluster_source` will be resident in a single block of `cluster_target`, +# as table `cluster_target` is clustered by `(a,b)`, the rows inside the one block are assumed to be sorted +# by `(a, b)`, consequently, the result of the following query should be ordered by `(a,b)` without an explicit +# `order by` clause. +query TTT +select * from cluster_target; +---- +1 a 2 +2 a 4 +2 b 3 +3 a 3 +12 b 1 + +## add more tests +statement ok +drop table if exists target_test; + +statement ok +drop table if exists source_test; + +statement ok +create table target_test(a int,b string); + +statement ok +insert into target_test values(1,'a'),(2,'b'),(3,'c'); + +statement ok +create table source_test(a int,b string,delete_flag bool); + +statement ok +insert into source_test values(1,'d',true),(2,'e',true),(3,'f',false),(4,'e',true),(5,'f',false); + +statement ok +create stage source_parquet file_format = (type = parquet); + +statement ok +remove @source_parquet; + +statement ok +copy into @source_parquet from (select * from source_test); + +query TTT +merge into `target_test` as tt using (select `a`,`b`,`delete_flag` from @source_parquet (pattern => '.*[.]parquet')) as ss on (ss.`a` = tt.`a`) +when matched and ss.`delete_flag` = true then delete when matched then update * when not matched and ss.`delete_flag` = false then insert *; +---- +1 1 2 + +query TT +select * from target_test order by a; +---- +3 f +5 f + +## test not match cast and predicate index +statement ok +drop table if exists test_order; + +statement ok +drop table if exists random_source; + +statement ok +create table test_order(id bigint, id1 bigint, id2 bigint, id3 bigint, id4 bigint, id5 bigint, id6 bigint, id7 bigint, s1 varchar, s2 varchar, s3 varchar, s4 varchar, s5 varchar, s6 varchar, s7 varchar, s8 varchar, s9 varchar, s10 varchar, s11 varchar, s12 varchar, s13 varchar, d1 DECIMAL(20, 8), d2 DECIMAL(20, 8), d3 DECIMAL(20, 8), d4 DECIMAL(20, 8), d5 DECIMAL(20, 8), d6 DECIMAL(30, 8), d7 DECIMAL(30, 8), d8 DECIMAL(30, 8), d9 DECIMAL(30, 8), d10 DECIMAL(30, 8),insert_time datetime, insert_time1 datetime, insert_time2 datetime, insert_time3 datetime,i int) CLUSTER BY(to_yyyymmdd(insert_time), id) bloom_index_columns='insert_time,id'; + +statement ok +create table random_source(id bigint not null, id1 bigint, id2 bigint, id3 bigint, id4 bigint, id5 bigint, id6 bigint, id7 bigint,s1 varchar, s2 varchar, s3 varchar, s4 varchar, s5 varchar, s6 varchar, s7 varchar, s8 varchar, s9 varchar, s10 varchar, s11 varchar, s12 varchar, s13 varchar,d1 DECIMAL(20, 8), d2 DECIMAL(20, 8), d3 DECIMAL(20, 8), d4 DECIMAL(20, 8), d5 DECIMAL(20, 8), d6 DECIMAL(30, 8), d7 DECIMAL(30, 8), d8 DECIMAL(30, 8), d9 DECIMAL(30, 8), d10 DECIMAL(30, 8),insert_time datetime not null, insert_time1 datetime, insert_time2 datetime, insert_time3 datetime,i int) Engine = Random; + +statement ok +merge into test_order as t using (select id,34 as id1,238 as id2, id3, id4, id5, id6, id7,s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13,d1, d2, d3, d4, d5, d6, d7, d8, d9, d10,insert_time,insert_time1,insert_time2,insert_time3,i from random_source limit 1) as s on t.id = s.id and t.insert_time = s.insert_time when matched then update * when not matched then insert *; + +## test update list #13297 +statement ok +create table t11(a int,b string, c string); + +statement ok +create table t12(a int,b string, c string); + +statement ok +insert into t11 values(1,'b1','c1'),(2,'b2','c2'); + +statement ok +insert into t12 values(1,'b_5','c_5'),(3,'b_6','c_6'); + +statement error 1065 +merge into t11 using (select a, c from t12) as t12 on t11.a = t12.a when matched and max(t11.a) > 0 then update set c = t12.c; + +statement error 1065 +merge into t11 using (select a, c from t12) as t12 on t11.a = t12.a when matched then update set c = count(*); + +## test issue #13287 +statement ok +create table tt1 (a int, b int); + +statement error 1065 +merge into tt1 using(select 10, 20) as tt2 on tt1.a = 1 when not matched and tt1.b = 2 then insert values (10, 20); + +query TT +merge into tt1 using(select 10 as a, 20 as b) as tt2 on tt1.a = 1 when not matched and tt2.b = 2 then insert values (10, 20); +---- +0 + +query T +select count(*) from tt1; +---- +0 + +## test issue #13367 +statement ok +create table tt2(a bool, b variant, c map(string, string)); + +statement ok +insert into tt2 values (true, '10', {'k1':'v1'}), (false, '20', {'k2':'v2'}) + +query T +merge into tt2 using(select true as x) as t on (x and tt2.a) when matched and tt2.a then update set tt2.b = parse_json('30'); +---- +1 + +query TTT +select a, b, c from tt2 order by b; +---- +0 20 {'k2':'v2'} +1 30 {'k1':'v1'} + +## add test: source is table +statement ok +drop table if exists t1; + +statement ok +drop table if exists t2; + +statement ok +create table t1(a int); + +statement ok +create table t2(a int); + +statement ok +insert into t1 values(1); + +statement ok +insert into t2 values(1),(2); + +query TT +merge into t1 using t2 on t1.a = t2.a when matched then delete when not matched then insert *; +---- +1 1 + +query T +select * from t1; +---- +2 + +statement ok +drop table if exists t1; + +statement ok +drop table if exists t2; + +statement ok +create table t1(b int); + +statement ok +create table t2(a int); + +statement ok +insert into t1 values(1); + +statement ok +insert into t2 values(1),(2); + +statement error 1065 +merge into t1 using t2 on t1.a = t2.a when matched then delete when not matched then insert *; + +## add more multi matched statement test +statement ok +drop table if exists t1; + +statement ok +drop table if exists t2; + +statement ok +create table t1(a int,b string,c bool); + +statement ok +create table t2(a int,b string,c bool); + +statement ok +insert into t1 values(1,'a1',true),(2,'a2',false),(3,'a3',true); + +statement ok +insert into t2 values(1,'b1',true),(2,'b2',false),(3,'b3',true); + +query TTT +select * from t1; +---- +1 a1 1 +2 a2 0 +3 a3 1 + +query TTT +select * from t2; +---- +1 b1 1 +2 b2 0 +3 b3 1 + +query TT +merge into t1 using t2 on t1.a = t2.a when matched and t1.a = 1 then delete when matched and t1.a = 2 then update * when matched and t1.a = 3 then delete; +---- +1 2 + +query TTT +select * from t1; +---- +2 b2 0 + +query T +merge into t1 using t2 on t1.a = t2.a when matched then delete; +---- +1 + +query T +select count(*) from t1; +---- +0 + +statement ok +insert into t1 values(1,'a1',true),(2,'a2',false),(3,'a3',true); + +query TT +merge into t1 using t2 on t1.a = t2.a when matched and t1.a = 2 then update * when matched and t1.a = 1 then delete when matched and t1.a = 3 then update *; +---- +2 1 + +query TTT +select * from t1; +---- +2 b2 0 +3 b3 1 + +## issue 13454 +statement ok +drop table if exists tt1; + +statement ok +create table tt1(a bool, b int); + +statement ok +insert into tt1 values (true, 1), (false, 2); + +query T +merge into tt1 using (select 1 as x) as tt2 on (2 > 1) when matched and a then delete; +---- +1 + +query TT +select * from tt1; +---- +0 2 + +## issue #13298 +statement ok +drop table if exists t11; + +statement ok +drop table if exists t12; + +statement ok +create table t12 (a int, b int); + +statement ok +create table t11 (a int, b int); + +statement ok +insert into t11 values (1, 10),(2, 20),(3, 30),(4, 40); + +statement ok +insert into t12 values (1, 10),(2, 20),(3, 30),(4, 40); + +query T +MERGE INTO t11 USING(SELECT NULL AS c0 FROM t12) AS t12 ON (t11.a OR TRUE) WHEN MATCHED AND TRUE THEN DELETE; +---- +4 + +query T +select count(*) from t11; +---- +0 + +## test issue #13732 +statement ok +CREATE TABLE orders CLUSTER BY (to_yyyymmddhh(created_at), user_id) AS SELECT + number % 5000 AS order_id, + number % 10000 AS user_id, + CASE WHEN (rand() * 10)::int % 2 = 0 THEN 'buy' + ELSE 'sell' + END AS order_type, + CASE WHEN (rand() * 10)::int % 3 = 0 THEN 'BTC' + WHEN (rand() * 10)::int % 3 = 1 THEN 'ETH' + ELSE 'XRP' + END AS asset_type, + (rand() * 100)::decimal(18, 8) AS quantity, + (rand() * 1000)::decimal(18, 8) AS price, + CASE WHEN (rand() * 10)::int % 3 = 0 THEN 'completed' + WHEN (rand() * 10)::int % 3 = 1 THEN 'pending' + ELSE 'cancelled' + END AS status, + date_add('day', floor(rand() * 10 % 365)::int, '2021-01-01') AS created_at, + date_add('day', floor(rand() * 10 % 365)::int, '2021-01-01') AS updated_at +FROM numbers(5000); + +statement ok +MERGE INTO orders USING +( + SELECT + number % 5000 AS order_id, + number % 100000 AS user_id, + CASE WHEN (rand() * 10)::int % 2 = 0 THEN 'buy' + ELSE 'sell' + END AS order_type, + CASE WHEN (rand() * 10)::int % 3 = 0 THEN 'BTC' + WHEN (rand() * 10)::int % 3 = 1 THEN 'ETH' + ELSE 'XRP' + END AS asset_type, + (rand() * 100)::decimal(18, 8) AS quantity, + (rand() * 1000)::decimal(18, 8) AS price, + CASE WHEN (rand() * 10)::int % 3 = 0 THEN 'completed' + WHEN (rand() * 10)::int % 3 = 1 THEN 'pending' + ELSE 'cancelled' + END AS status, + date_add('day', floor(rand() * 10 % 365)::int, '2021-01-01') AS created_at, + date_add('day', floor(rand() * 10 % 365)::int, '2021-01-01') AS updated_at + FROM numbers(5000) +) AS source +ON orders.order_id = source.order_id +WHEN MATCHED THEN + UPDATE SET + orders.user_id = source.user_id, + orders.order_type = source.order_type, + orders.asset_type = source.asset_type, + orders.quantity = source.quantity, + orders.price = source.price, + orders.status = source.status, + orders.created_at = source.created_at, + orders.updated_at = source.updated_at +WHEN NOT MATCHED THEN + INSERT (order_id, user_id, order_type, asset_type, quantity, price, status, created_at, updated_at) + VALUES (source.order_id, source.user_id, source.order_type, source.asset_type, source.quantity, source.price, source.status, source.created_at, source.updated_at); + +## test issue #13733 +statement ok +CREATE TABLE transactions CLUSTER BY (to_yyyymmddhh(transaction_time), user_id) AS SELECT + number % 1000000 AS transaction_id, + number % 100000 AS user_id, + CASE WHEN (rand() * 10)::int % 3 = 0 THEN 'deposit' + WHEN (rand() * 10)::int % 3 = 1 THEN 'withdrawal' + ELSE 'trade' +END AS transaction_type, + CASE WHEN (rand() * 10)::int % 3 = 0 THEN 'BTC' + WHEN (rand() * 10)::int % 3 = 1 THEN 'ETH' + ELSE 'XRP' +END AS asset_type, + (rand() * 100)::decimal(18, 8) AS quantity, + date_add('day', floor(rand() * 10 % 365)::int, '2021-01-01') AS transaction_time +FROM numbers(1000000); + +statement ok +MERGE INTO orders AS tt USING +( + SELECT + CASE + WHEN number % 2 = 0 THEN (number / 2) % 250000 + ELSE (SELECT MAX(order_id) FROM orders) + number + 1 + END AS order_id, + number % 100000 AS user_id, + CASE WHEN (rand() * 10)::int % 2 = 0 THEN 'buy' + ELSE 'sell' + END AS order_type, + CASE WHEN (rand() * 10)::int % 3 = 0 THEN 'BTC' + WHEN (rand() * 10)::int % 3 = 1 THEN 'ETH' + ELSE 'XRP' + END AS asset_type, + (rand() * 100)::decimal(18, 8) AS quantity, + (rand() * 1000)::decimal(18, 8) AS price, + CASE WHEN (rand() * 10)::int % 3 = 0 THEN 'completed' + WHEN (rand() * 10)::int % 3 = 1 THEN 'pending' + ELSE 'cancelled' + END AS status, + date_add('day', floor(rand() * 10 % 365)::int, '2021-01-01') AS created_at, + date_add('day', floor(rand() * 10 % 365)::int, '2021-01-01') AS updated_at, + CASE WHEN number % 2 = 0 THEN false ELSE true END AS is_delete + FROM numbers(5000) +) AS ss +ON (tt.user_id = ss.user_id AND tt.asset_type = ss.asset_type) +WHEN MATCHED AND ss.is_delete = true THEN + DELETE +WHEN MATCHED AND ss.is_delete = false THEN + UPDATE * WHEN NOT MATCHED THEN + INSERT *; + +## unsupport complex exprs for now. +## #13798 we need to support non-correlated-subquery for unmatched values exprs +statement error 1065 +MERGE INTO orders USING ( + SELECT t.user_id, t.asset_type, 'buy' AS synthetic_order_type, SUM(t.quantity) AS total_quantity, today() AS synthetic_date + FROM transactions t + WHERE t.transaction_type = 'deposit' + GROUP BY t.user_id, t.asset_type + HAVING SUM(t.quantity) > 100 +) AS synthetic_orders ON orders.user_id = synthetic_orders.user_id AND orders.asset_type = synthetic_orders.asset_type +WHEN NOT MATCHED THEN + INSERT (order_id, user_id, order_type, asset_type, quantity, price, status, created_at, updated_at) + VALUES ((SELECT MAX(order_id) FROM orders) + 1, synthetic_orders.user_id, synthetic_orders.synthetic_order_type, synthetic_orders.asset_type, synthetic_orders.total_quantity, 0, 'pending', synthetic_orders.synthetic_date, synthetic_orders.synthetic_date); + +## issue #13810: rewrite rule test +statement ok +DROP TABLE IF EXISTS orders; + +statement ok +CREATE TABLE orders ( + order_id INT NOT NULL, + user_id INT NOT NULL, + order_type VARCHAR NOT NULL, + asset_type VARCHAR NOT NULL, + quantity DECIMAL(18,8) NOT NULL, + price DECIMAL(18,8) NOT NULL, + status VARCHAR NOT NULL, + created_at DATE NOT NULL, + updated_at DATE NOT NULL +) row_per_block=5113; + +statement ok +insert into orders values(200007,7,'buy','BTC',4.81412194,48.14121943,'completed',to_date('2021-01-01'),to_date('2021-01-01')), +(200015,15,'buy','BTC',3.78463552,37.84635523,'completed',to_date('2021-01-01'),to_date('2021-01-01')), +(200019,19,'buy','BTC',1.61186913,16.11869132,'completed',to_date('2021-01-01'),to_date('2021-01-01')), +(200031,31,'buy','BTC',3.99013730,39.90137297,'completed',to_date('2021-01-01'),to_date('2021-01-01')), +(200047,47,'buy','BTC',0.98841829,9.88418289,'completed',to_date('2021-01-01'),to_date('2021-01-01')), +(200077,77,'buy','BTC',2.07360391,20.73603908,'completed',to_date('2021-01-01'),to_date('2021-01-01')), +(200087,87,'sell','ETH',9.64567442,96.45674419,'pending',to_date('2021-01-01'),to_date('2021-01-01')), +(200095,95,'buy','BTC',2.26686563,22.66865634,'completed',to_date('2021-01-01'),to_date('2021-01-01')), +(200098,98,'buy','BTC',1.37252960,13.72529599,'completed',to_date('2021-01-01'),to_date('2021-01-01')), +(200102,102,'buy','BTC',1.53596481,15.35964815,'completed',to_date('2021-01-01'),to_date('2021-01-01')); + +query T +MERGE INTO orders USING ( + SELECT o.order_id, o.user_id, o.order_type, o.asset_type, o.quantity + a.avg_quantity AS new_quantity, o.price, o.status, o.created_at, o.updated_at + FROM orders o + INNER JOIN ( + SELECT user_id, asset_type, sum(quantity) AS avg_quantity + FROM orders + GROUP BY user_id, asset_type + ) a ON o.user_id = a.user_id AND o.asset_type = a.asset_type +) AS joined_data ON orders.order_id = joined_data.order_id + WHEN MATCHED THEN + UPDATE SET orders.quantity = joined_data.new_quantity; +---- +10 + +query TTTT +SELECT SUM(quantity) AS total_quantity, + AVG(quantity) AS average_quantity, + MIN(quantity) AS min_quantity, + MAX(quantity) AS max_quantity +FROM orders; +---- +64.16764110 6.416764110000 1.97683658 19.29134884 + +statement ok +create table tb_01 (id int,c1 varchar,c2 datetime(0),c3 json); + +statement ok +create table tmp_01 like tb_01; + +statement ok +insert into tmp_01 values(1,'abc',to_date('2023-11-29'),parse_json('{"a":1}')); + +query TT +merge into tb_01 as T using ( select * from tmp_01) as S on t.id = s.id when matched then update * when not matched then insert *; +---- +1 0 + +query TTT +select id,c1,to_date(c2),c3 from tb_01; +---- +1 abc 2023-11-29 {"a":1} + +statement ok +set enable_experimental_merge_into = 0; From 6bc71243f412166870cbe664a7cfdd0ae31e87f3 Mon Sep 17 00:00:00 2001 From: Yang Xiufeng Date: Mon, 4 Dec 2023 23:00:44 +0800 Subject: [PATCH 09/16] feat: iceberg table engine. (#13835) * feat: iceberg table engine. * feat: iceberg table engine. * refactor: use table options. * show table options when 'show create table.' * fix clippy * rebase --- Cargo.lock | 2 + src/query/ast/src/ast/statements/table.rs | 2 + src/query/ast/src/parser/statement.rs | 1 + .../interpreters/interpreter_table_create.rs | 7 ++ .../interpreter_table_show_create.rs | 2 +- src/query/service/src/sessions/query_ctx.rs | 19 ++++- .../service/src/sessions/query_ctx_shared.rs | 8 ++ .../infer_schema/infer_schema_table.rs | 2 +- .../inspect_parquet/inspect_parquet_table.rs | 2 +- .../list_stage/list_stage_table.rs | 2 +- .../it/storages/testdata/engines_table.txt | 21 ++--- src/query/sql/Cargo.toml | 1 + .../src/planner/binder/copy_into_location.rs | 2 +- .../sql/src/planner/binder/copy_into_table.rs | 8 +- .../sql/src/planner/binder/ddl/catalog.rs | 2 +- src/query/sql/src/planner/binder/ddl/stage.rs | 5 +- src/query/sql/src/planner/binder/ddl/table.rs | 38 +++++++-- src/query/sql/src/planner/binder/location.rs | 35 +++++++- src/query/sql/src/planner/binder/mod.rs | 1 + src/query/sql/src/planner/binder/presign.rs | 3 +- src/query/sql/src/planner/binder/table.rs | 2 +- .../common/table_meta/src/table/table_keys.rs | 3 + src/query/storages/factory/Cargo.toml | 1 + .../storages/factory/src/storage_factory.rs | 7 ++ src/query/storages/iceberg/src/catalog.rs | 17 ++-- src/query/storages/iceberg/src/database.rs | 2 +- src/query/storages/iceberg/src/lib.rs | 1 + src/query/storages/iceberg/src/table.rs | 83 ++++++++++++------- .../01_system/01_0004_system_engines.test | 2 +- .../base/06_show/06_0006_show_engines.test | 1 + .../query/case_sensitivity/system_table.test | 1 + .../10_iceberg/10_0002_iceberg_engine.result | 29 +++++++ .../10_iceberg/10_0002_iceberg_engine.sh | 26 ++++++ 33 files changed, 258 insertions(+), 80 deletions(-) create mode 100755 tests/suites/1_stateful/10_iceberg/10_0002_iceberg_engine.result create mode 100755 tests/suites/1_stateful/10_iceberg/10_0002_iceberg_engine.sh diff --git a/Cargo.lock b/Cargo.lock index ab60f2a1db94..258a52d8e7e3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2691,6 +2691,7 @@ dependencies = [ "common-profile", "common-settings", "common-storage", + "common-storages-iceberg", "common-storages-parquet", "common-storages-result-cache", "common-storages-stage", @@ -2764,6 +2765,7 @@ dependencies = [ "common-exception", "common-meta-app", "common-storages-fuse", + "common-storages-iceberg", "common-storages-memory", "common-storages-null", "common-storages-random", diff --git a/src/query/ast/src/ast/statements/table.rs b/src/query/ast/src/ast/statements/table.rs index 518223b4d62e..7115d399c598 100644 --- a/src/query/ast/src/ast/statements/table.rs +++ b/src/query/ast/src/ast/statements/table.rs @@ -619,6 +619,7 @@ pub enum Engine { Fuse, View, Random, + Iceberg, } impl Display for Engine { @@ -629,6 +630,7 @@ impl Display for Engine { Engine::Fuse => write!(f, "FUSE"), Engine::View => write!(f, "VIEW"), Engine::Random => write!(f, "RANDOM"), + Engine::Iceberg => write!(f, "ICEBERG"), } } } diff --git a/src/query/ast/src/parser/statement.rs b/src/query/ast/src/parser/statement.rs index e980c82f3a85..c58c22cf2e7f 100644 --- a/src/query/ast/src/parser/statement.rs +++ b/src/query/ast/src/parser/statement.rs @@ -2936,6 +2936,7 @@ pub fn engine(i: Input) -> IResult { value(Engine::Fuse, rule! { FUSE }), value(Engine::View, rule! { VIEW }), value(Engine::Random, rule! { RANDOM }), + value(Engine::Iceberg, rule! { ICEBERG }), )); map( diff --git a/src/query/service/src/interpreters/interpreter_table_create.rs b/src/query/service/src/interpreters/interpreter_table_create.rs index 57b9030c0038..7685f8dc346d 100644 --- a/src/query/service/src/interpreters/interpreter_table_create.rs +++ b/src/query/service/src/interpreters/interpreter_table_create.rs @@ -58,8 +58,10 @@ use storages_common_table_meta::meta::Versioned; use storages_common_table_meta::table::OPT_KEY_BLOOM_INDEX_COLUMNS; use storages_common_table_meta::table::OPT_KEY_CHANGE_TRACKING; use storages_common_table_meta::table::OPT_KEY_COMMENT; +use storages_common_table_meta::table::OPT_KEY_CONNECTION_NAME; use storages_common_table_meta::table::OPT_KEY_DATABASE_ID; use storages_common_table_meta::table::OPT_KEY_ENGINE; +use storages_common_table_meta::table::OPT_KEY_LOCATION; use storages_common_table_meta::table::OPT_KEY_SNAPSHOT_LOCATION; use storages_common_table_meta::table::OPT_KEY_STORAGE_FORMAT; use storages_common_table_meta::table::OPT_KEY_STORAGE_PREFIX; @@ -436,6 +438,11 @@ pub static CREATE_TABLE_OPTIONS: Lazy> = Lazy::new(|| { r.insert(OPT_KEY_ENGINE); + r.insert(OPT_KEY_ENGINE); + + r.insert(OPT_KEY_LOCATION); + r.insert(OPT_KEY_CONNECTION_NAME); + r.insert("transient"); r }); diff --git a/src/query/service/src/interpreters/interpreter_table_show_create.rs b/src/query/service/src/interpreters/interpreter_table_show_create.rs index eebe43242539..7759705651c0 100644 --- a/src/query/service/src/interpreters/interpreter_table_show_create.rs +++ b/src/query/service/src/interpreters/interpreter_table_show_create.rs @@ -158,7 +158,7 @@ impl ShowCreateTableInterpreter { .get_hide_options_in_show_create_table() .unwrap_or(false); - if !hide_options_in_show_create_table { + if !hide_options_in_show_create_table || engine == "ICEBERG" { table_create_sql.push_str({ let mut opts = table_info.options().iter().collect::>(); opts.sort_by_key(|(k, _)| *k); diff --git a/src/query/service/src/sessions/query_ctx.rs b/src/query/service/src/sessions/query_ctx.rs index a9f883f96a92..8b78581f9db5 100644 --- a/src/query/service/src/sessions/query_ctx.rs +++ b/src/query/service/src/sessions/query_ctx.rs @@ -74,6 +74,7 @@ use common_storage::MergeStatus; use common_storage::StageFileInfo; use common_storage::StorageMetrics; use common_storages_fuse::TableContext; +use common_storages_iceberg::IcebergTable; use common_storages_parquet::Parquet2Table; use common_storages_parquet::ParquetRSTable; use common_storages_result_cache::ResultScan; @@ -97,6 +98,7 @@ use crate::sessions::QueryContextShared; use crate::sessions::Session; use crate::sessions::SessionManager; use crate::sessions::SessionType; +use crate::sql::binder::get_storage_params_from_options; use crate::storages::Table; const MYSQL_VERSION: &str = "8.0.26"; @@ -684,9 +686,7 @@ impl TableContext for QueryContext { } } async fn get_connection(&self, name: &str) -> Result { - let user_mgr = UserApiProvider::instance(); - let tenant = self.get_tenant(); - user_mgr.get_connection(&tenant, name).await + self.shared.get_connection(name).await } /// Fetch a Table by db and table name. @@ -703,7 +703,18 @@ impl TableContext for QueryContext { database: &str, table: &str, ) -> Result> { - self.shared.get_table(catalog, database, table).await + let table = self.shared.get_table(catalog, database, table).await?; + // the better place to do this is in the QueryContextShared::get_table_to_cache() method, + // but there is no way to access dyn TableContext. + let table: Arc = if table.engine() == "ICEBERG" { + let sp = get_storage_params_from_options(self, table.options()).await?; + let mut info = table.get_table_info().to_owned(); + info.meta.storage_params = Some(sp); + IcebergTable::try_create(info.to_owned())?.into() + } else { + table + }; + Ok(table) } #[async_backtrace::framed] diff --git a/src/query/service/src/sessions/query_ctx_shared.rs b/src/query/service/src/sessions/query_ctx_shared.rs index 6fa346e439d8..11b42a58b89d 100644 --- a/src/query/service/src/sessions/query_ctx_shared.rs +++ b/src/query/service/src/sessions/query_ctx_shared.rs @@ -30,6 +30,7 @@ use common_exception::ErrorCode; use common_exception::Result; use common_meta_app::principal::OnErrorMode; use common_meta_app::principal::RoleInfo; +use common_meta_app::principal::UserDefinedConnection; use common_meta_app::principal::UserInfo; use common_pipeline_core::InputError; use common_settings::Settings; @@ -37,6 +38,7 @@ use common_storage::CopyStatus; use common_storage::DataOperator; use common_storage::MergeStatus; use common_storage::StorageMetrics; +use common_users::UserApiProvider; use dashmap::DashMap; use parking_lot::Mutex; use parking_lot::RwLock; @@ -401,6 +403,12 @@ impl QueryContextShared { let status = self.status.read(); status.clone() } + + pub async fn get_connection(&self, name: &str) -> Result { + let user_mgr = UserApiProvider::instance(); + let tenant = self.get_tenant(); + user_mgr.get_connection(&tenant, name).await + } } impl Drop for QueryContextShared { diff --git a/src/query/service/src/table_functions/infer_schema/infer_schema_table.rs b/src/query/service/src/table_functions/infer_schema/infer_schema_table.rs index f9d8c70ccdb6..ac4c3262815c 100644 --- a/src/query/service/src/table_functions/infer_schema/infer_schema_table.rs +++ b/src/query/service/src/table_functions/infer_schema/infer_schema_table.rs @@ -210,7 +210,7 @@ impl AsyncSource for InferSchemaSource { )?; FileLocation::Uri(uri) }; - let (stage_info, path) = resolve_file_location(&self.ctx, &file_location).await?; + let (stage_info, path) = resolve_file_location(self.ctx.as_ref(), &file_location).await?; let enable_experimental_rbac_check = self .ctx .get_settings() diff --git a/src/query/service/src/table_functions/inspect_parquet/inspect_parquet_table.rs b/src/query/service/src/table_functions/inspect_parquet/inspect_parquet_table.rs index 5eb4222ce82b..634617967d82 100644 --- a/src/query/service/src/table_functions/inspect_parquet/inspect_parquet_table.rs +++ b/src/query/service/src/table_functions/inspect_parquet/inspect_parquet_table.rs @@ -209,7 +209,7 @@ impl AsyncSource for InspectParquetSource { } self.is_finished = true; let uri = self.uri.strip_prefix('@').unwrap().to_string(); - let (stage_info, path) = resolve_stage_location(&self.ctx, &uri).await?; + let (stage_info, path) = resolve_stage_location(self.ctx.as_ref(), &uri).await?; let enable_experimental_rbac_check = self .ctx .get_settings() diff --git a/src/query/service/src/table_functions/list_stage/list_stage_table.rs b/src/query/service/src/table_functions/list_stage/list_stage_table.rs index 0932f15268b4..9fd9777a7a2d 100644 --- a/src/query/service/src/table_functions/list_stage/list_stage_table.rs +++ b/src/query/service/src/table_functions/list_stage/list_stage_table.rs @@ -184,7 +184,7 @@ impl AsyncSource for ListStagesSource { self.is_finished = true; let (stage_info, path) = - resolve_stage_location(&self.ctx, &self.args_parsed.location).await?; + resolve_stage_location(self.ctx.as_ref(), &self.args_parsed.location).await?; let enable_experimental_rbac_check = self .ctx .get_settings() diff --git a/src/query/service/tests/it/storages/testdata/engines_table.txt b/src/query/service/tests/it/storages/testdata/engines_table.txt index d34ffe14de1d..2832883fa293 100644 --- a/src/query/service/tests/it/storages/testdata/engines_table.txt +++ b/src/query/service/tests/it/storages/testdata/engines_table.txt @@ -1,15 +1,16 @@ ---------- TABLE INFO ------------ DB.Table: 'system'.'engines', Table: engines-table_id:1, ver:0, Engine: SystemEngines -------- TABLE CONTENTS ---------- -+----------+-------------------------------+ -| Column 0 | Column 1 | -+----------+-------------------------------+ -| 'FUSE' | 'FUSE Storage Engine' | -| 'MEMORY' | 'MEMORY Storage Engine' | -| 'NULL' | 'NULL Storage Engine' | -| 'RANDOM' | 'RANDOM Storage Engine' | -| 'STREAM' | 'STREAM STORAGE Engine' | -| 'VIEW' | 'VIEW STORAGE (LOGICAL VIEW)' | -+----------+-------------------------------+ ++-----------+-------------------------------+ +| Column 0 | Column 1 | ++-----------+-------------------------------+ +| 'FUSE' | 'FUSE Storage Engine' | +| 'ICEBERG' | 'ICEBERG STORAGE Engine' | +| 'MEMORY' | 'MEMORY Storage Engine' | +| 'NULL' | 'NULL Storage Engine' | +| 'RANDOM' | 'RANDOM Storage Engine' | +| 'STREAM' | 'STREAM STORAGE Engine' | +| 'VIEW' | 'VIEW STORAGE (LOGICAL VIEW)' | ++-----------+-------------------------------+ diff --git a/src/query/sql/Cargo.toml b/src/query/sql/Cargo.toml index 9cf5a14e2a67..58a266508b6b 100644 --- a/src/query/sql/Cargo.toml +++ b/src/query/sql/Cargo.toml @@ -37,6 +37,7 @@ common-pipeline-transforms = { path = "../pipeline/transforms" } common-profile = { path = "../profile" } common-settings = { path = "../settings" } common-storage = { path = "../../common/storage" } +common-storages-iceberg = { path = "../storages/iceberg" } common-storages-parquet = { path = "../storages/parquet" } common-storages-result-cache = { path = "../storages/result_cache" } common-storages-stage = { path = "../storages/stage" } diff --git a/src/query/sql/src/planner/binder/copy_into_location.rs b/src/query/sql/src/planner/binder/copy_into_location.rs index 9f0f98ee158d..3a77dc715fa4 100644 --- a/src/query/sql/src/planner/binder/copy_into_location.rs +++ b/src/query/sql/src/planner/binder/copy_into_location.rs @@ -73,7 +73,7 @@ impl<'a> Binder { } }?; - let (mut stage_info, path) = resolve_file_location(&self.ctx, &stmt.dst).await?; + let (mut stage_info, path) = resolve_file_location(self.ctx.as_ref(), &stmt.dst).await?; self.apply_copy_into_location_options(stmt, &mut stage_info) .await?; diff --git a/src/query/sql/src/planner/binder/copy_into_table.rs b/src/query/sql/src/planner/binder/copy_into_table.rs index ff9731af4836..77dff6e32eb5 100644 --- a/src/query/sql/src/planner/binder/copy_into_table.rs +++ b/src/query/sql/src/planner/binder/copy_into_table.rs @@ -119,7 +119,7 @@ impl<'a> Binder { let validation_mode = ValidationMode::from_str(stmt.validation_mode.as_str()) .map_err(ErrorCode::SyntaxException)?; - let (mut stage_info, path) = resolve_file_location(&self.ctx, location).await?; + let (mut stage_info, path) = resolve_file_location(self.ctx.as_ref(), location).await?; self.apply_copy_into_table_options(stmt, &mut stage_info) .await?; let files_info = StageFilesInfo { @@ -218,7 +218,7 @@ impl<'a> Binder { attachment: StageAttachment, ) -> Result<(StageInfo, StageFilesInfo)> { let (mut stage_info, path) = - resolve_stage_location(&self.ctx, &attachment.location[1..]).await?; + resolve_stage_location(self.ctx.as_ref(), &attachment.location[1..]).await?; if let Some(ref options) = attachment.file_format_options { stage_info.file_format_params = FileFormatOptionsAst { @@ -527,7 +527,7 @@ fn check_transform_query( /// - @internal/abc => (internal, "/stage/internal/abc") #[async_backtrace::framed] pub async fn resolve_stage_location( - ctx: &Arc, + ctx: &dyn TableContext, location: &str, ) -> Result<(StageInfo, String)> { // my_named_stage/abc/ @@ -550,7 +550,7 @@ pub async fn resolve_stage_location( #[async_backtrace::framed] pub async fn resolve_file_location( - ctx: &Arc, + ctx: &dyn TableContext, location: &FileLocation, ) -> Result<(StageInfo, String)> { match location.clone() { diff --git a/src/query/sql/src/planner/binder/ddl/catalog.rs b/src/query/sql/src/planner/binder/ddl/catalog.rs index 042e699f93c8..c0a0423503c9 100644 --- a/src/query/sql/src/planner/binder/ddl/catalog.rs +++ b/src/query/sql/src/planner/binder/ddl/catalog.rs @@ -198,7 +198,7 @@ async fn parse_catalog_url( }; let mut location = UriLocation::from_uri(uri, "".to_string(), options)?; - let (sp, _) = parse_uri_location(&mut location, Some(ctx)).await?; + let (sp, _) = parse_uri_location(&mut location, Some(ctx.as_ref())).await?; Ok(Some(sp)) } diff --git a/src/query/sql/src/planner/binder/ddl/stage.rs b/src/query/sql/src/planner/binder/ddl/stage.rs index 0217a9e8bbe4..b4d52923a0ad 100644 --- a/src/query/sql/src/planner/binder/ddl/stage.rs +++ b/src/query/sql/src/planner/binder/ddl/stage.rs @@ -39,7 +39,7 @@ impl Binder { location: &str, pattern: &str, ) -> Result { - let (stage, path) = resolve_stage_location(&self.ctx, location).await?; + let (stage, path) = resolve_stage_location(self.ctx.as_ref(), location).await?; let plan_node = RemoveStagePlan { path, stage, @@ -82,7 +82,8 @@ impl Binder { connection: uri.connection.clone(), }; - let (stage_storage, path) = parse_uri_location(&mut uri, Some(&self.ctx)).await?; + let (stage_storage, path) = + parse_uri_location(&mut uri, Some(self.ctx.as_ref())).await?; if !path.ends_with('/') { return Err(ErrorCode::SyntaxException( diff --git a/src/query/sql/src/planner/binder/ddl/table.rs b/src/query/sql/src/planner/binder/ddl/table.rs index 428626ec5610..e923f209bec8 100644 --- a/src/query/sql/src/planner/binder/ddl/table.rs +++ b/src/query/sql/src/planner/binder/ddl/table.rs @@ -69,6 +69,7 @@ use common_expression::TableSchemaRefExt; use common_functions::BUILTIN_FUNCTIONS; use common_meta_app::storage::StorageParams; use common_storage::DataOperator; +use common_storages_iceberg::IcebergTable; use common_storages_view::view_table::QUERY; use common_storages_view::view_table::VIEW_ENGINE; use log::debug; @@ -80,7 +81,8 @@ use storages_common_table_meta::table::OPT_KEY_STORAGE_PREFIX; use storages_common_table_meta::table::OPT_KEY_TABLE_ATTACHED_DATA_URI; use storages_common_table_meta::table::OPT_KEY_TABLE_COMPRESSION; -use crate::binder::location::parse_uri_location; +use crate::binder::get_storage_params_from_options; +use crate::binder::parse_uri_location; use crate::binder::scalar::ScalarBinder; use crate::binder::Binder; use crate::binder::ColumnBindingBuilder; @@ -418,8 +420,8 @@ impl Binder { )?; } - let (storage_params, part_prefix) = match uri_location { - Some(uri) => { + let (mut storage_params, part_prefix) = match (uri_location, engine) { + (Some(uri), Engine::Fuse) => { let mut uri = UriLocation { protocol: uri.protocol.clone(), name: uri.name.clone(), @@ -427,7 +429,7 @@ impl Binder { part_prefix: uri.part_prefix.clone(), connection: uri.connection.clone(), }; - let (sp, _) = parse_uri_location(&mut uri, Some(&self.ctx)).await?; + let (sp, _) = parse_uri_location(&mut uri, Some(self.ctx.as_ref())).await?; // create a temporary op to check if params is correct DataOperator::try_create(&sp).await?; @@ -441,7 +443,11 @@ impl Binder { (Some(sp), fp) } - None => (None, "".to_string()), + (Some(uri), _) => Err(ErrorCode::BadArguments(format!( + "Incorrect CREATE query: CREATE TABLE with external location is only supported for FUSE engine, but got {:?} for {:?}", + engine, uri + )))?, + _ => (None, "".to_string()), }; // If table is TRANSIENT, set a flag in table option @@ -495,9 +501,23 @@ impl Binder { Self::validate_create_table_schema(&source_schema)?; (source_schema, source_comments) } - _ => Err(ErrorCode::BadArguments( - "Incorrect CREATE query: required list of column descriptions or AS section or SELECT..", - ))?, + _ => { + if engine == Engine::Iceberg { + let sp = get_storage_params_from_options(self.ctx.as_ref(), &options).await?; + let dop = DataOperator::try_new(&sp)?; + let table = IcebergTable::load_iceberg_table(dop).await?; + let table_schema = IcebergTable::get_schema(&table).await?; + // the first version of current iceberg table do not need to persist the storage_params, + // since we get it from table options location and connection when load table each time. + // we do this in case we change this idea. + storage_params = Some(sp); + (Arc::new(table_schema), vec![]) + } else { + Err(ErrorCode::BadArguments( + "Incorrect CREATE query: required list of column descriptions or AS section or SELECT or ICEBERG table engine", + ))? + } + } }; // for fuse engine, we will insert database_id, so if we check it in execute phase, @@ -646,7 +666,7 @@ impl Binder { let mut uri = stmt.uri_location.clone(); uri.path = root; - let (sp, _) = parse_uri_location(&mut uri, Some(&self.ctx)).await?; + let (sp, _) = parse_uri_location(&mut uri, Some(self.ctx.as_ref())).await?; // create a temporary op to check if params is correct DataOperator::try_create(&sp).await?; diff --git a/src/query/sql/src/planner/binder/location.rs b/src/query/sql/src/planner/binder/location.rs index 3eac662ca594..b958281ba715 100644 --- a/src/query/sql/src/planner/binder/location.rs +++ b/src/query/sql/src/planner/binder/location.rs @@ -12,16 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::collections::BTreeMap; use std::io::Error; use std::io::ErrorKind; use std::io::Result; -use std::sync::Arc; use anyhow::anyhow; use common_ast::ast::Connection; use common_ast::ast::UriLocation; use common_catalog::table_context::TableContext; use common_config::GlobalConfig; +use common_exception::ErrorCode; use common_meta_app::storage::StorageAzblobConfig; use common_meta_app::storage::StorageFsConfig; use common_meta_app::storage::StorageGcsConfig; @@ -386,7 +387,7 @@ fn parse_webhdfs_params(l: &mut UriLocation) -> Result { /// parse_uri_location will parse given UriLocation into StorageParams and Path. pub async fn parse_uri_location( l: &mut UriLocation, - ctx: Option<&Arc>, + ctx: Option<&dyn TableContext>, ) -> Result<(StorageParams, String)> { // Path endswith `/` means it's a directory, otherwise it's a file. // If the path is a directory, we will use this path as root. @@ -498,3 +499,33 @@ pub async fn parse_uri_location( Ok((sp, path)) } + +pub async fn get_storage_params_from_options( + ctx: &dyn TableContext, + options: &BTreeMap, +) -> common_exception::Result { + let location = options + .get("location") + .ok_or_else(|| ErrorCode::BadArguments("missing option 'location'".to_string()))?; + let connection = options.get("connection_name"); + + let mut location = if let Some(connection) = connection { + let connection = ctx.get_connection(connection).await?; + let location = UriLocation::from_uri( + location.to_string(), + "".to_string(), + connection.storage_params, + )?; + if location.protocol.to_lowercase() != connection.storage_type { + return Err(ErrorCode::BadArguments(format!( + "Incorrect CREATE query: protocol in location {:?} is not equal to connection {:?}", + location.protocol, connection.storage_type + ))); + }; + location + } else { + UriLocation::from_uri(location.to_string(), "".to_string(), BTreeMap::new())? + }; + let (sp, _) = parse_uri_location(&mut location, None).await?; + Ok(sp) +} diff --git a/src/query/sql/src/planner/binder/mod.rs b/src/query/sql/src/planner/binder/mod.rs index 86ae9ff07961..4058e57945c7 100644 --- a/src/query/sql/src/planner/binder/mod.rs +++ b/src/query/sql/src/planner/binder/mod.rs @@ -62,6 +62,7 @@ pub use column_binding::ColumnBindingBuilder; pub use copy_into_table::resolve_file_location; pub use copy_into_table::resolve_stage_location; pub use internal_column_factory::INTERNAL_COLUMN_FACTORY; +pub use location::get_storage_params_from_options; pub use location::parse_uri_location; pub use merge_into::MergeIntoType; pub use scalar::ScalarBinder; diff --git a/src/query/sql/src/planner/binder/presign.rs b/src/query/sql/src/planner/binder/presign.rs index 2b93bf2c4304..bb7771ab6f93 100644 --- a/src/query/sql/src/planner/binder/presign.rs +++ b/src/query/sql/src/planner/binder/presign.rs @@ -33,7 +33,8 @@ impl Binder { ) -> Result { match &stmt.location { PresignLocation::StageLocation(stage_location) => { - let (stage_info, path) = resolve_stage_location(&self.ctx, stage_location).await?; + let (stage_info, path) = + resolve_stage_location(self.ctx.as_ref(), stage_location).await?; Ok(Plan::Presign(Box::new(PresignPlan { stage: Box::new(stage_info), diff --git a/src/query/sql/src/planner/binder/table.rs b/src/query/sql/src/planner/binder/table.rs index dc7d6e8a6dfa..c0ef8ee4c877 100644 --- a/src/query/sql/src/planner/binder/table.rs +++ b/src/query/sql/src/planner/binder/table.rs @@ -737,7 +737,7 @@ impl Binder { }), _ => location.clone(), }; - let (mut stage_info, path) = resolve_file_location(&self.ctx, &location).await?; + let (mut stage_info, path) = resolve_file_location(self.ctx.as_ref(), &location).await?; if let Some(f) = &options.file_format { stage_info.file_format_params = match StageFileFormatType::from_str(f) { Ok(t) => FileFormatParams::default_by_type(t)?, diff --git a/src/query/storages/common/table_meta/src/table/table_keys.rs b/src/query/storages/common/table_meta/src/table/table_keys.rs index 1b2fa41ed375..391f0f3f4bb1 100644 --- a/src/query/storages/common/table_meta/src/table/table_keys.rs +++ b/src/query/storages/common/table_meta/src/table/table_keys.rs @@ -30,6 +30,9 @@ pub const OPT_KEY_TABLE_ATTACHED_DATA_URI: &str = "table_data_uri"; // Read only attached table options. pub const OPT_KEY_TABLE_ATTACHED_READ_ONLY: &str = "read_only_attached"; +pub const OPT_KEY_LOCATION: &str = "location"; +pub const OPT_KEY_CONNECTION_NAME: &str = "connection_name"; + /// Legacy table snapshot location key /// /// # Deprecated diff --git a/src/query/storages/factory/Cargo.toml b/src/query/storages/factory/Cargo.toml index 4dbdb6146841..f0149ce63a9a 100644 --- a/src/query/storages/factory/Cargo.toml +++ b/src/query/storages/factory/Cargo.toml @@ -17,6 +17,7 @@ common-config = { path = "../../config" } common-exception = { path = "../../../common/exception" } common-meta-app = { path = "../../../meta/app" } common-storages-fuse = { path = "../fuse" } +common-storages-iceberg = { path = "../iceberg" } common-storages-memory = { path = "../memory" } common-storages-null = { path = "../null" } common-storages-random = { path = "../random" } diff --git a/src/query/storages/factory/src/storage_factory.rs b/src/query/storages/factory/src/storage_factory.rs index d95dccb5169b..6cdb2199d09c 100644 --- a/src/query/storages/factory/src/storage_factory.rs +++ b/src/query/storages/factory/src/storage_factory.rs @@ -19,6 +19,7 @@ use common_config::InnerConfig; use common_exception::ErrorCode; use common_exception::Result; use common_meta_app::schema::TableInfo; +use common_storages_iceberg::IcebergTable; use common_storages_memory::MemoryTable; use common_storages_null::NullTable; use common_storages_random::RandomTable; @@ -109,6 +110,12 @@ impl StorageFactory { descriptor: Arc::new(StreamTable::description), }); + // Register STREAM table engine + creators.insert("ICEBERG".to_string(), Storage { + creator: Arc::new(IcebergTable::try_create), + descriptor: Arc::new(IcebergTable::description), + }); + StorageFactory { storages: creators } } diff --git a/src/query/storages/iceberg/src/catalog.rs b/src/query/storages/iceberg/src/catalog.rs index 8477b4fc42cd..606dcb27d37a 100644 --- a/src/query/storages/iceberg/src/catalog.rs +++ b/src/query/storages/iceberg/src/catalog.rs @@ -89,7 +89,7 @@ use futures::TryStreamExt; use opendal::Metakey; use crate::database::IcebergDatabase; -use crate::table::IcebergTable; +use crate::IcebergTable; pub const ICEBERG_CATALOG: &str = "iceberg"; @@ -234,18 +234,15 @@ impl Catalog for IcebergCatalog { } fn get_table_by_info(&self, table_info: &TableInfo) -> Result> { - let table_sp = table_info - .meta - .storage_params - .clone() - .ok_or(ErrorCode::BadArguments( + if table_info.meta.storage_params.is_none() { + return Err(ErrorCode::BadArguments( "table storage params not set, this is not a valid table info for iceberg table", - ))?; + )); + } - let op = DataOperator::try_new(&table_sp)?; - let table = IcebergTable::try_new(op, table_info.clone())?; + let table: Arc = IcebergTable::try_create(table_info.clone())?.into(); - Ok(Arc::new(table)) + Ok(table) } #[async_backtrace::framed] diff --git a/src/query/storages/iceberg/src/database.rs b/src/query/storages/iceberg/src/database.rs index a921b2c2d3d3..f5c1fc726730 100644 --- a/src/query/storages/iceberg/src/database.rs +++ b/src/query/storages/iceberg/src/database.rs @@ -91,7 +91,7 @@ impl Database for IcebergDatabase { let table_sp = table_sp.auto_detect().await?; let tbl_root = DataOperator::try_create(&table_sp).await?; - let tbl = IcebergTable::try_create( + let tbl = IcebergTable::try_create_from_iceberg_catalog( &self.ctl_name, &self.info.name_ident.db_name, table_name, diff --git a/src/query/storages/iceberg/src/lib.rs b/src/query/storages/iceberg/src/lib.rs index f67b88924fa1..31597b1f8b29 100644 --- a/src/query/storages/iceberg/src/lib.rs +++ b/src/query/storages/iceberg/src/lib.rs @@ -101,3 +101,4 @@ mod table_source; pub use catalog::IcebergCatalog; pub use catalog::IcebergCreator; pub use catalog::ICEBERG_CATALOG; +pub use table::IcebergTable; diff --git a/src/query/storages/iceberg/src/table.rs b/src/query/storages/iceberg/src/table.rs index 0aaf629c9a26..ea2465d9d278 100644 --- a/src/query/storages/iceberg/src/table.rs +++ b/src/query/storages/iceberg/src/table.rs @@ -20,6 +20,7 @@ use async_trait::async_trait; use chrono::Utc; use common_arrow::arrow::datatypes::Field as Arrow2Field; use common_arrow::arrow::datatypes::Schema as Arrow2Schema; +use common_catalog::catalog::StorageDescription; use common_catalog::plan::DataSourcePlan; use common_catalog::plan::ParquetReadOptions; use common_catalog::plan::PartInfo; @@ -38,7 +39,9 @@ use common_functions::BUILTIN_FUNCTIONS; use common_meta_app::schema::TableIdent; use common_meta_app::schema::TableInfo; use common_meta_app::schema::TableMeta; +use common_meta_app::storage::StorageParams; use common_pipeline_core::Pipeline; +use common_storage::init_operator; use common_storage::DataOperator; use common_storages_parquet::ParquetFilesPart; use common_storages_parquet::ParquetPart; @@ -53,35 +56,44 @@ use crate::partition::IcebergPartInfo; use crate::stats::get_stats_of_data_file; use crate::table_source::IcebergTableSource; +pub const ICEBERG_ENGINE: &str = "ICEBERG"; + /// accessor wrapper as a table /// /// TODO: we should use icelake Table instead. pub struct IcebergTable { info: TableInfo, - op: DataOperator, - table: OnceCell, } impl IcebergTable { /// create a new table on the table directory #[async_backtrace::framed] - pub fn try_new(dop: DataOperator, info: TableInfo) -> Result { - Ok(Self { + pub fn try_create(info: TableInfo) -> Result> { + Ok(Box::new(Self { info, - op: dop, table: OnceCell::new(), + })) + } + + pub fn description() -> StorageDescription { + StorageDescription { + engine_name: ICEBERG_ENGINE.to_string(), + comment: "ICEBERG STORAGE Engine".to_string(), + support_cluster_key: false, + } + } + + fn get_storage_params(&self) -> Result<&StorageParams> { + self.info.meta.storage_params.as_ref().ok_or_else(|| { + ErrorCode::BadArguments(format!( + "Iceberg table {} must have storage parameters", + self.info.name + )) }) } - /// create a new table on the table directory - #[async_backtrace::framed] - pub async fn try_create( - catalog: &str, - database: &str, - table_name: &str, - dop: DataOperator, - ) -> Result { + pub async fn load_iceberg_table(dop: DataOperator) -> Result { // FIXME: we should implement catalog for icelake. let icelake_catalog = Arc::new(icelake::catalog::StorageCatalog::new( "databend", @@ -89,10 +101,12 @@ impl IcebergTable { )); let table_id = icelake::TableIdentifier::new(vec![""]).unwrap(); - let table = icelake_catalog.load_table(&table_id).await.map_err(|err| { + icelake_catalog.load_table(&table_id).await.map_err(|err| { ErrorCode::ReadTableDataError(format!("Iceberg catalog load failed: {err:?}")) - })?; + }) + } + pub async fn get_schema(table: &icelake::Table) -> Result { let meta = table.current_table_metadata(); // Build arrow schema from iceberg metadata. @@ -116,7 +130,19 @@ impl IcebergTable { .collect(); let arrow2_schema = Arrow2Schema::from(fields); - let table_schema = TableSchema::from(&arrow2_schema); + Ok(TableSchema::from(&arrow2_schema)) + } + + /// create a new table on the table directory + #[async_backtrace::framed] + pub async fn try_create_from_iceberg_catalog( + catalog: &str, + database: &str, + table_name: &str, + dop: DataOperator, + ) -> Result { + let table = Self::load_iceberg_table(dop.clone()).await?; + let table_schema = Self::get_schema(&table).await?; // construct table info let info = TableInfo { @@ -136,7 +162,6 @@ impl IcebergTable { Ok(Self { info, - op: dop, table: OnceCell::new_with(Some(table)), }) } @@ -144,10 +169,12 @@ impl IcebergTable { async fn table(&self) -> Result<&icelake::Table> { self.table .get_or_try_init(|| async { + let sp = self.get_storage_params()?; + let op = DataOperator::try_new(sp)?; // FIXME: we should implement catalog for icelake. let icelake_catalog = Arc::new(icelake::catalog::StorageCatalog::new( "databend", - OperatorCreatorWrapper(self.op.clone()), + OperatorCreatorWrapper(op), )); let table_id = icelake::TableIdentifier::new(vec![""]).unwrap(); @@ -202,17 +229,15 @@ impl IcebergTable { read_options, )?; - let mut builder = ParquetRSReaderBuilder::create( - ctx.clone(), - self.op.operator(), - table_schema, - &arrow_schema, - )? - .with_options(read_options) - .with_push_downs(plan.push_downs.as_ref()) - .with_pruner(Some(pruner)); + let sp = self.get_storage_params()?; + let op = init_operator(sp)?; + let mut builder = + ParquetRSReaderBuilder::create(ctx.clone(), op, table_schema, &arrow_schema)? + .with_options(read_options) + .with_push_downs(plan.push_downs.as_ref()) + .with_pruner(Some(pruner)); - let praquet_reader = Arc::new(builder.build_full_reader()?); + let parquet_reader = Arc::new(builder.build_full_reader()?); // TODO: we need to support top_k. let output_schema = Arc::new(DataSchema::from(plan.schema())); @@ -222,7 +247,7 @@ impl IcebergTable { ctx.clone(), output, output_schema.clone(), - praquet_reader.clone(), + parquet_reader.clone(), ) }, max_threads.max(1), diff --git a/tests/sqllogictests/suites/base/01_system/01_0004_system_engines.test b/tests/sqllogictests/suites/base/01_system/01_0004_system_engines.test index c164a76b23bc..b3cade654880 100644 --- a/tests/sqllogictests/suites/base/01_system/01_0004_system_engines.test +++ b/tests/sqllogictests/suites/base/01_system/01_0004_system_engines.test @@ -2,5 +2,5 @@ onlyif mysql query TT SELECT * FROM system.engines ORDER BY "Engine" LIMIT 1,2 ---- +ICEBERG ICEBERG STORAGE Engine MEMORY MEMORY Storage Engine -NULL NULL Storage Engine diff --git a/tests/sqllogictests/suites/base/06_show/06_0006_show_engines.test b/tests/sqllogictests/suites/base/06_show/06_0006_show_engines.test index 27b1311a2a6e..d7fccf6eeba8 100644 --- a/tests/sqllogictests/suites/base/06_show/06_0006_show_engines.test +++ b/tests/sqllogictests/suites/base/06_show/06_0006_show_engines.test @@ -2,6 +2,7 @@ query TT SHOW ENGINES ---- FUSE FUSE Storage Engine +ICEBERG ICEBERG STORAGE Engine MEMORY MEMORY Storage Engine NULL NULL Storage Engine RANDOM RANDOM Storage Engine diff --git a/tests/sqllogictests/suites/query/case_sensitivity/system_table.test b/tests/sqllogictests/suites/query/case_sensitivity/system_table.test index f9f2c171cea2..90792c9742bd 100644 --- a/tests/sqllogictests/suites/query/case_sensitivity/system_table.test +++ b/tests/sqllogictests/suites/query/case_sensitivity/system_table.test @@ -3,6 +3,7 @@ query I select "Engine" as engine from system.engines order by engine ---- FUSE +ICEBERG MEMORY NULL RANDOM diff --git a/tests/suites/1_stateful/10_iceberg/10_0002_iceberg_engine.result b/tests/suites/1_stateful/10_iceberg/10_0002_iceberg_engine.result new file mode 100755 index 000000000000..e0a2dd380b93 --- /dev/null +++ b/tests/suites/1_stateful/10_iceberg/10_0002_iceberg_engine.result @@ -0,0 +1,29 @@ +>>>> drop table if exists test_iceberg; +>>>> create table test_iceberg engine = iceberg location = 'fs://${ROOT}/'; +>>>> select * from test_iceberg order by id, data; +1 a +2 b +3 c +4 d +5 e +6 d +<<<< +>>>> drop table test_iceberg; +>>>> drop connection if exists iceberg_conn; +>>>> create connection iceberg_conn storage_type = 's3' access_key_id ='minioadmin' secret_access_key ='minioadmin' ENDPOINT_URL='http://127.0.0.1:9900'; +>>>> create table test_iceberg engine = iceberg location = 's3://testbucket/iceberg_ctl/iceberg_db/iceberg_tbl/' connection_name = 'iceberg_conn' ; +>>>> select * from test_iceberg order by id, data; +1 a +2 b +3 c +4 d +5 e +6 d +<<<< +>>>> show create table test_iceberg; +test_iceberg CREATE TABLE `test_iceberg` ( + `id` INT NOT NULL, + `data` VARCHAR NOT NULL +) ENGINE=ICEBERG CONNECTION_NAME='iceberg_conn' LOCATION='s3://testbucket/iceberg_ctl/iceberg_db/iceberg_tbl/' +<<<< +>>>> drop table test_iceberg; diff --git a/tests/suites/1_stateful/10_iceberg/10_0002_iceberg_engine.sh b/tests/suites/1_stateful/10_iceberg/10_0002_iceberg_engine.sh new file mode 100755 index 000000000000..8568e86bb6c4 --- /dev/null +++ b/tests/suites/1_stateful/10_iceberg/10_0002_iceberg_engine.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. "$CURDIR"/../../../shell_env.sh + +ROOT=$(realpath "$CURDIR"/../../../data/iceberg/iceberg_ctl/iceberg_db/iceberg_tbl/) + +stmt "drop table if exists test_iceberg;" + +echo ">>>> create table test_iceberg engine = iceberg location = 'fs://\${ROOT}/';" +echo "create table test_iceberg engine = iceberg location = 'fs://${ROOT}/';" | $BENDSQL_CLIENT_CONNECT + +query "select * from test_iceberg order by id, data;" + +stmt "drop table test_iceberg;" + +stmt "drop connection if exists iceberg_conn;" +stmt "create connection iceberg_conn storage_type = 's3' access_key_id ='minioadmin' secret_access_key ='minioadmin' ENDPOINT_URL='http://127.0.0.1:9900';" + +echo ">>>> create table test_iceberg engine = iceberg location = 's3://testbucket/iceberg_ctl/iceberg_db/iceberg_tbl/' connection_name = 'iceberg_conn' ;" +echo "create table test_iceberg engine = iceberg location = 's3://testbucket/iceberg_ctl/iceberg_db/iceberg_tbl/' connection_name = 'iceberg_conn';" | $BENDSQL_CLIENT_CONNECT +query "select * from test_iceberg order by id, data;" +query "show create table test_iceberg;" +stmt "drop table test_iceberg;" + + From 4fc87e4b07b8669724a26267cea3935e10d92491 Mon Sep 17 00:00:00 2001 From: TCeason <33082201+TCeason@users.noreply.github.com> Date: Tue, 5 Dec 2023 11:18:27 +0800 Subject: [PATCH 10/16] fix(query): add some dml prvilege check (#13845) --- src/meta/app/src/principal/user_stage.rs | 7 ++- .../src/stage_from_to_protobuf_impl.rs | 2 +- .../proto-conv/tests/it/user_proto_conv.rs | 22 +++---- .../proto-conv/tests/it/v025_user_stage.rs | 2 +- .../proto-conv/tests/it/v035_user_stage.rs | 2 +- .../tests/it/v042_s3_stage_new_field.rs | 2 +- .../interpreters/access/privilege_access.rs | 36 +++++++++-- .../infer_schema/infer_schema_table.rs | 2 +- .../inspect_parquet/inspect_parquet_table.rs | 2 +- .../list_stage/list_stage_table.rs | 2 +- src/query/storages/system/src/stages_table.rs | 2 +- .../18_rbac/20_0012_privilege_access.result | 12 +++- .../18_rbac/20_0012_privilege_access.sh | 61 +++++++++++++++++-- .../00_stage/00_0012_stage_priv.result | 1 + .../1_stateful/00_stage/00_0012_stage_priv.sh | 23 +++++++ 15 files changed, 145 insertions(+), 33 deletions(-) diff --git a/src/meta/app/src/principal/user_stage.rs b/src/meta/app/src/principal/user_stage.rs index 0e2345527074..2077fbfff960 100644 --- a/src/meta/app/src/principal/user_stage.rs +++ b/src/meta/app/src/principal/user_stage.rs @@ -561,7 +561,8 @@ pub struct StageInfo { pub stage_name: String, pub stage_type: StageType, pub stage_params: StageParams, - pub is_from_uri: bool, + // on `COPY INTO xx FROM 's3://xxx?ak=?&sk=?'`, the URL(ExternalLocation) will be treated as an temporary stage. + pub is_temporary: bool, pub file_format_params: FileFormatParams, pub copy_options: CopyOptions, pub comment: String, @@ -580,11 +581,11 @@ impl StageInfo { } } - pub fn new_external_stage(storage: StorageParams, path: &str, from_uri: bool) -> StageInfo { + pub fn new_external_stage(storage: StorageParams, path: &str, is_temporary: bool) -> StageInfo { StageInfo { stage_name: format!("{storage},path={path}"), stage_type: StageType::External, - is_from_uri: from_uri, + is_temporary, stage_params: StageParams { storage }, ..Default::default() } diff --git a/src/meta/proto-conv/src/stage_from_to_protobuf_impl.rs b/src/meta/proto-conv/src/stage_from_to_protobuf_impl.rs index a3a83e225d0c..40c5a14b3272 100644 --- a/src/meta/proto-conv/src/stage_from_to_protobuf_impl.rs +++ b/src/meta/proto-conv/src/stage_from_to_protobuf_impl.rs @@ -207,7 +207,7 @@ impl FromToProto for mt::principal::StageInfo { reason: "StageInfo.stage_params cannot be None".to_string(), }, )?)?, - is_from_uri: false, + is_temporary: false, file_format_params, copy_options: mt::principal::CopyOptions::from_pb(p.copy_options.ok_or_else( || Incompatible { diff --git a/src/meta/proto-conv/tests/it/user_proto_conv.rs b/src/meta/proto-conv/tests/it/user_proto_conv.rs index 1e310de1f96e..b8cc3327620b 100644 --- a/src/meta/proto-conv/tests/it/user_proto_conv.rs +++ b/src/meta/proto-conv/tests/it/user_proto_conv.rs @@ -79,7 +79,7 @@ pub(crate) fn test_fs_stage_info() -> mt::principal::StageInfo { root: "/dir/to/files".to_string(), }), }, - is_from_uri: false, + is_temporary: false, file_format_params: mt::principal::FileFormatParams::Json( mt::principal::JsonFileFormatParams { compression: mt::principal::StageFileCompression::Bz2, @@ -121,7 +121,7 @@ pub(crate) fn test_s3_stage_info() -> mt::principal::StageInfo { ..Default::default() }), }, - is_from_uri: false, + is_temporary: false, file_format_params: mt::principal::FileFormatParams::Json( mt::principal::JsonFileFormatParams { compression: mt::principal::StageFileCompression::Bz2, @@ -160,7 +160,7 @@ pub(crate) fn test_s3_stage_info_v16() -> mt::principal::StageInfo { ..Default::default() }), }, - is_from_uri: false, + is_temporary: false, file_format_params: mt::principal::FileFormatParams::Json( mt::principal::JsonFileFormatParams { compression: mt::principal::StageFileCompression::Bz2, @@ -199,7 +199,7 @@ pub(crate) fn test_s3_stage_info_v14() -> mt::principal::StageInfo { ..Default::default() }), }, - is_from_uri: false, + is_temporary: false, file_format_params: mt::principal::FileFormatParams::Json( mt::principal::JsonFileFormatParams { compression: mt::principal::StageFileCompression::Bz2, @@ -234,7 +234,7 @@ pub(crate) fn test_gcs_stage_info() -> mt::principal::StageInfo { credential: "my_credential".to_string(), }), }, - is_from_uri: false, + is_temporary: false, file_format_params: mt::principal::FileFormatParams::Json( mt::principal::JsonFileFormatParams { compression: mt::principal::StageFileCompression::Bz2, @@ -273,7 +273,7 @@ pub(crate) fn test_oss_stage_info() -> mt::principal::StageInfo { server_side_encryption_key_id: "".to_string(), }), }, - is_from_uri: false, + is_temporary: false, file_format_params: mt::principal::FileFormatParams::Json( mt::principal::JsonFileFormatParams { compression: mt::principal::StageFileCompression::Bz2, @@ -307,7 +307,7 @@ pub(crate) fn test_webhdfs_stage_info() -> mt::principal::StageInfo { delegation: "".to_string(), }), }, - is_from_uri: false, + is_temporary: false, file_format_params: mt::principal::FileFormatParams::Json( mt::principal::JsonFileFormatParams { compression: mt::principal::StageFileCompression::Bz2, @@ -342,7 +342,7 @@ pub(crate) fn test_obs_stage_info() -> mt::principal::StageInfo { bucket: "bucket".to_string(), }), }, - is_from_uri: false, + is_temporary: false, file_format_params: mt::principal::FileFormatParams::Json( mt::principal::JsonFileFormatParams { compression: mt::principal::StageFileCompression::Bz2, @@ -377,7 +377,7 @@ pub(crate) fn test_cos_stage_info() -> mt::principal::StageInfo { bucket: "bucket".to_string(), }), }, - is_from_uri: false, + is_temporary: false, file_format_params: mt::principal::FileFormatParams::Json( mt::principal::JsonFileFormatParams { compression: mt::principal::StageFileCompression::Bz2, @@ -820,7 +820,7 @@ pub(crate) fn test_internal_stage_info_v17() -> mt::principal::StageInfo { root: "/dir/to/files".to_string(), }), }, - is_from_uri: false, + is_temporary: false, file_format_params: mt::principal::FileFormatParams::Json( mt::principal::JsonFileFormatParams { compression: mt::principal::StageFileCompression::Bz2, @@ -851,7 +851,7 @@ pub(crate) fn test_stage_info_v18() -> mt::principal::StageInfo { root: "/dir/to/files".to_string(), }), }, - is_from_uri: false, + is_temporary: false, file_format_params: mt::principal::FileFormatParams::Json( mt::principal::JsonFileFormatParams { compression: mt::principal::StageFileCompression::Bz2, diff --git a/src/meta/proto-conv/tests/it/v025_user_stage.rs b/src/meta/proto-conv/tests/it/v025_user_stage.rs index fc65bfc2647e..82add6d8f7e8 100644 --- a/src/meta/proto-conv/tests/it/v025_user_stage.rs +++ b/src/meta/proto-conv/tests/it/v025_user_stage.rs @@ -50,7 +50,7 @@ fn test_decode_v25_user_stage() -> anyhow::Result<()> { root: "/dir/to/files".to_string(), }), }, - is_from_uri: false, + is_temporary: false, file_format_params: mt::principal::FileFormatParams::Json( mt::principal::JsonFileFormatParams { compression: mt::principal::StageFileCompression::Bz2, diff --git a/src/meta/proto-conv/tests/it/v035_user_stage.rs b/src/meta/proto-conv/tests/it/v035_user_stage.rs index d774201a5543..e4478e16d86c 100644 --- a/src/meta/proto-conv/tests/it/v035_user_stage.rs +++ b/src/meta/proto-conv/tests/it/v035_user_stage.rs @@ -47,7 +47,7 @@ fn test_decode_v35_user_stage() -> anyhow::Result<()> { root: "/dir/to/files".to_string(), }), }, - is_from_uri: false, + is_temporary: false, file_format_params: mt::principal::FileFormatParams::Json( mt::principal::JsonFileFormatParams { compression: mt::principal::StageFileCompression::Bz2, diff --git a/src/meta/proto-conv/tests/it/v042_s3_stage_new_field.rs b/src/meta/proto-conv/tests/it/v042_s3_stage_new_field.rs index 1bef1628721a..f888a9928dfc 100644 --- a/src/meta/proto-conv/tests/it/v042_s3_stage_new_field.rs +++ b/src/meta/proto-conv/tests/it/v042_s3_stage_new_field.rs @@ -51,7 +51,7 @@ fn test_decode_v42_s3_stage_new_field() -> anyhow::Result<()> { ..Default::default() }), }, - is_from_uri: false, + is_temporary: false, file_format_params: mt::principal::FileFormatParams::Json( mt::principal::JsonFileFormatParams { compression: mt::principal::StageFileCompression::Bz2, diff --git a/src/query/service/src/interpreters/access/privilege_access.rs b/src/query/service/src/interpreters/access/privilege_access.rs index 43b38d90f239..756a39d24010 100644 --- a/src/query/service/src/interpreters/access/privilege_access.rs +++ b/src/query/service/src/interpreters/access/privilege_access.rs @@ -26,6 +26,7 @@ use common_meta_app::principal::StageType; use common_meta_app::principal::UserGrantSet; use common_meta_app::principal::UserPrivilegeType; use common_sql::optimizer::get_udf_names; +use common_sql::plans::InsertInputSource; use common_sql::plans::PresignAction; use common_sql::plans::RewriteKind; use common_users::RoleCacheManager; @@ -140,7 +141,7 @@ impl PrivilegeAccess { } // skip check the temp stage from uri like `COPY INTO tbl FROM 'http://xxx'` - if stage_info.is_from_uri { + if stage_info.is_temporary { return Ok(()); } @@ -400,13 +401,15 @@ impl AccessChecker for PrivilegeAccess { .await? } Plan::CreateTable(plan) => { - // TODO(TCeason): as_select need check privilege. self.validate_access( &GrantObject::Database(plan.catalog.clone(), plan.database.clone()), vec![UserPrivilegeType::Create], true, ) .await?; + if let Some(query) = &plan.as_select { + self.check(ctx, query).await?; + } } Plan::DropTable(plan) => { self.validate_access( @@ -610,7 +613,6 @@ impl AccessChecker for PrivilegeAccess { } // Others. Plan::Insert(plan) => { - //TODO(TCeason): source need to check privileges. self.validate_access( &GrantObject::Table( plan.catalog.clone(), @@ -621,9 +623,20 @@ impl AccessChecker for PrivilegeAccess { true, ) .await?; + match &plan.source { + InsertInputSource::SelectPlan(plan) => { + self.check(ctx, plan).await?; + } + InsertInputSource::Stage(plan) => { + self.check(ctx, plan).await?; + } + InsertInputSource::StreamingWithFormat(..) + | InsertInputSource::StreamingWithFileFormat {..} + | InsertInputSource::Values {..} => {} + } } Plan::Replace(plan) => { - //TODO(TCeason): source and delete_when need to check privileges. + //plan.delete_when is Expr no need to check privileges. self.validate_access( &GrantObject::Table( plan.catalog.clone(), @@ -634,6 +647,17 @@ impl AccessChecker for PrivilegeAccess { true, ) .await?; + match &plan.source { + InsertInputSource::SelectPlan(plan) => { + self.check(ctx, plan).await?; + } + InsertInputSource::Stage(plan) => { + self.check(ctx, plan).await?; + } + InsertInputSource::StreamingWithFormat(..) + | InsertInputSource::StreamingWithFileFormat {..} + | InsertInputSource::Values {..} => {} + } } Plan::MergeInto(plan) => { if enable_experimental_rbac_check { @@ -864,7 +888,6 @@ impl AccessChecker for PrivilegeAccess { .await?; } Plan::CopyIntoTable(plan) => { - // TODO(TCeason): need to check plan.query privileges. self.validate_access_stage(&plan.stage_table_info.stage_info, UserPrivilegeType::Read).await?; self .validate_access( @@ -877,6 +900,9 @@ impl AccessChecker for PrivilegeAccess { true, ) .await?; + if let Some(query) = &plan.query { + self.check(ctx, query).await?; + } } Plan::CopyIntoLocation(plan) => { self.validate_access_stage(&plan.stage, UserPrivilegeType::Write).await?; diff --git a/src/query/service/src/table_functions/infer_schema/infer_schema_table.rs b/src/query/service/src/table_functions/infer_schema/infer_schema_table.rs index ac4c3262815c..2c502bb2d864 100644 --- a/src/query/service/src/table_functions/infer_schema/infer_schema_table.rs +++ b/src/query/service/src/table_functions/infer_schema/infer_schema_table.rs @@ -217,7 +217,7 @@ impl AsyncSource for InferSchemaSource { .get_enable_experimental_rbac_check()?; if enable_experimental_rbac_check { let visibility_checker = self.ctx.get_visibility_checker().await?; - if !stage_info.is_from_uri + if !stage_info.is_temporary && !visibility_checker.check_stage_read_visibility(&stage_info.stage_name) { return Err(ErrorCode::PermissionDenied(format!( diff --git a/src/query/service/src/table_functions/inspect_parquet/inspect_parquet_table.rs b/src/query/service/src/table_functions/inspect_parquet/inspect_parquet_table.rs index 634617967d82..57455e8a3b07 100644 --- a/src/query/service/src/table_functions/inspect_parquet/inspect_parquet_table.rs +++ b/src/query/service/src/table_functions/inspect_parquet/inspect_parquet_table.rs @@ -216,7 +216,7 @@ impl AsyncSource for InspectParquetSource { .get_enable_experimental_rbac_check()?; if enable_experimental_rbac_check { let visibility_checker = self.ctx.get_visibility_checker().await?; - if !stage_info.is_from_uri + if !stage_info.is_temporary && !visibility_checker.check_stage_read_visibility(&stage_info.stage_name) { return Err(ErrorCode::PermissionDenied(format!( diff --git a/src/query/service/src/table_functions/list_stage/list_stage_table.rs b/src/query/service/src/table_functions/list_stage/list_stage_table.rs index 9fd9777a7a2d..b5d45ca90165 100644 --- a/src/query/service/src/table_functions/list_stage/list_stage_table.rs +++ b/src/query/service/src/table_functions/list_stage/list_stage_table.rs @@ -191,7 +191,7 @@ impl AsyncSource for ListStagesSource { .get_enable_experimental_rbac_check()?; if enable_experimental_rbac_check { let visibility_checker = self.ctx.get_visibility_checker().await?; - if !stage_info.is_from_uri + if !stage_info.is_temporary && !visibility_checker.check_stage_read_visibility(&stage_info.stage_name) { return Err(ErrorCode::PermissionDenied(format!( diff --git a/src/query/storages/system/src/stages_table.rs b/src/query/storages/system/src/stages_table.rs index 3089c04ab1ee..ae995b592a7e 100644 --- a/src/query/storages/system/src/stages_table.rs +++ b/src/query/storages/system/src/stages_table.rs @@ -63,7 +63,7 @@ impl AsyncSystemTable for StagesTable { stages .into_iter() .filter(|stage| { - !stage.is_from_uri + !stage.is_temporary && visibility_checker.check_stage_visibility(&stage.stage_name) }) .collect::>() diff --git a/tests/suites/0_stateless/18_rbac/20_0012_privilege_access.result b/tests/suites/0_stateless/18_rbac/20_0012_privilege_access.result index bfb72238eace..70a7148341b7 100644 --- a/tests/suites/0_stateless/18_rbac/20_0012_privilege_access.result +++ b/tests/suites/0_stateless/18_rbac/20_0012_privilege_access.result @@ -49,7 +49,15 @@ dummy TINYINT UNSIGNED NO NULL NULL c1 INT NO NULL NULL Error: APIError: ResponseError with 1063: Permission denied, user 'a'@'%' don't have privilege for table system.tables Error: APIError: ResponseError with 1063: Permission denied, user 'a'@'%' don't have privilege for database nogrant -2 +1 +0 93 8 -2 +1 +0 +Error: APIError: ResponseError with 1063: Permission denied, privilege [Select] is required on 'default'.'default'.'t1' for user 'b'@'%' with roles [public] +Error: APIError: ResponseError with 1063: Permission denied, privilege [Read] is required on STAGE s3 for user 'b'@'%' with roles [public] +Error: APIError: ResponseError with 1063: Permission denied, privilege [Select] is required on 'default'.'default'.'t' for user 'b'@'%' with roles [public] +Error: APIError: ResponseError with 1063: Permission denied, privilege [Read] is required on STAGE s3 for user 'b'@'%' with roles [public] +Error: APIError: ResponseError with 1063: Permission denied, privilege [Read] is required on STAGE s3 for user 'b'@'%' with roles [public] +Error: APIError: ResponseError with 1063: Permission denied, privilege [Select] is required on 'default'.'default'.'t1' for user 'b'@'%' with roles [public] diff --git a/tests/suites/0_stateless/18_rbac/20_0012_privilege_access.sh b/tests/suites/0_stateless/18_rbac/20_0012_privilege_access.sh index d90b149f535b..a646c49a04e2 100755 --- a/tests/suites/0_stateless/18_rbac/20_0012_privilege_access.sh +++ b/tests/suites/0_stateless/18_rbac/20_0012_privilege_access.sh @@ -167,16 +167,69 @@ echo "show columns from t from grant_db" | $USER_A_CONNECT echo "show columns from tables from system" | $USER_A_CONNECT echo "show tables from nogrant" | $USER_A_CONNECT - -# should return result: 2. default.test_t.id and grant_db.t.c1 -echo "select count(1) from information_schema.columns where table_schema not in ('information_schema', 'system');" | $USER_A_CONNECT +echo "select count(1) from information_schema.columns where table_schema in ('grant_db');" | $USER_A_CONNECT +echo "select count(1) from information_schema.columns where table_schema in ('nogrant');" | $USER_A_CONNECT echo "select count(1) from information_schema.columns where table_schema in ('information_schema', 'system');" | $USER_A_CONNECT echo "select count(1) from information_schema.tables where table_schema in ('information_schema', 'system');;" | $USER_A_CONNECT -echo "select count(1) from information_schema.tables where table_schema not in ('information_schema', 'system');" | $USER_A_CONNECT +echo "select count(1) from information_schema.tables where table_schema in ('grant_db');" | $USER_A_CONNECT +echo "select count(1) from information_schema.tables where table_schema in ('nogrant');" | $USER_A_CONNECT + +#DML privilege check +export USER_B_CONNECT="bendsql --user=b --password=password --host=${QUERY_MYSQL_HANDLER_HOST} --port ${QUERY_HTTP_HANDLER_PORT}" + +rm -rf /tmp/00_0020 +mkdir -p /tmp/00_0020 +cat << EOF > /tmp/00_0020/i0.csv +1 +2 +EOF + +echo "drop user if exists b" | $BENDSQL_CLIENT_CONNECT +echo "create user b identified by '$TEST_USER_PASSWORD'" | $BENDSQL_CLIENT_CONNECT + +echo "drop table if exists t" | $BENDSQL_CLIENT_CONNECT +echo "drop table if exists t1" | $BENDSQL_CLIENT_CONNECT +echo "drop table if exists t2" | $BENDSQL_CLIENT_CONNECT +echo "drop stage if exists s3;" | $BENDSQL_CLIENT_CONNECT + +echo "create table t(id int)" | $BENDSQL_CLIENT_CONNECT +echo "create table t1(id int)" | $BENDSQL_CLIENT_CONNECT +echo "grant create on default.* to b" | $BENDSQL_CLIENT_CONNECT +echo "grant insert, delete on default.t to b" | $BENDSQL_CLIENT_CONNECT +echo "grant select on system.* to b" | $BENDSQL_CLIENT_CONNECT + +echo "create stage s3;" | $BENDSQL_CLIENT_CONNECT +echo "copy into '@s3/a b' from (select 2);" | $BENDSQL_CLIENT_CONNECT + +# need err +echo "insert into t select * from t1" | $USER_B_CONNECT +echo "insert into t select * from @s3" | $USER_B_CONNECT +echo "create table t2 as select * from t" | $USER_B_CONNECT +echo "create table t2 as select * from @s3" | $USER_B_CONNECT +echo "copy into t from (select * from @s3);" | $USER_B_CONNECT +echo "replace into t on(id) select * from t1;" | $USER_B_CONNECT + +echo "grant select on default.t to b" | $BENDSQL_CLIENT_CONNECT +echo "grant select on default.t1 to b" | $BENDSQL_CLIENT_CONNECT +echo "grant read on stage s3 to b" | $BENDSQL_CLIENT_CONNECT + +echo "insert into t select * from t1" | $USER_B_CONNECT +echo "insert into t select * from @s3" | $USER_B_CONNECT +echo "create table t2 as select * from t" | $USER_B_CONNECT +echo "drop table t2" | $BENDSQL_CLIENT_CONNECT +echo "create table t2 as select * from @s3" | $USER_B_CONNECT +echo "copy into t from (select * from @s3);" | $USER_B_CONNECT +echo "replace into t on(id) select * from t1;" | $USER_B_CONNECT ## Drop user echo "drop user a" | $BENDSQL_CLIENT_CONNECT +echo "drop user b" | $BENDSQL_CLIENT_CONNECT echo "drop database if exists no_grant" | $BENDSQL_CLIENT_CONNECT echo "drop database grant_db" | $BENDSQL_CLIENT_CONNECT +echo "drop table if exists t" | $BENDSQL_CLIENT_CONNECT +echo "drop table if exists t1" | $BENDSQL_CLIENT_CONNECT +echo "drop table if exists t2" | $BENDSQL_CLIENT_CONNECT +echo "drop stage if exists s3;" | $BENDSQL_CLIENT_CONNECT + echo "unset enable_experimental_rbac_check" | $BENDSQL_CLIENT_CONNECT diff --git a/tests/suites/1_stateful/00_stage/00_0012_stage_priv.result b/tests/suites/1_stateful/00_stage/00_0012_stage_priv.result index fb5805dd5c1b..4d6611dc26b2 100644 --- a/tests/suites/1_stateful/00_stage/00_0012_stage_priv.result +++ b/tests/suites/1_stateful/00_stage/00_0012_stage_priv.result @@ -27,3 +27,4 @@ Error: APIError: ResponseError with 1063: Permission denied, privilege READ is r 1 1 2 2 === check access user's local stage === +Error: APIError: ResponseError with 1063: Permission denied, privilege [Select] is required on 'default'.'system'.'stage' for user 'b'@'%' with roles [public] diff --git a/tests/suites/1_stateful/00_stage/00_0012_stage_priv.sh b/tests/suites/1_stateful/00_stage/00_0012_stage_priv.sh index 2931c518857c..ad6e9286a705 100755 --- a/tests/suites/1_stateful/00_stage/00_0012_stage_priv.sh +++ b/tests/suites/1_stateful/00_stage/00_0012_stage_priv.sh @@ -6,6 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) export TEST_USER_NAME="u1" export TEST_USER_PASSWORD="password" export TEST_USER_CONNECT="bendsql --user=u1 --password=password --host=${QUERY_MYSQL_HANDLER_HOST} --port ${QUERY_HTTP_HANDLER_PORT}" +export USER_B_CONNECT="bendsql --user=b --password=password --host=${QUERY_MYSQL_HANDLER_HOST} --port ${QUERY_HTTP_HANDLER_PORT}" echo "set global enable_experimental_rbac_check=1" | $BENDSQL_CLIENT_CONNECT @@ -120,6 +121,28 @@ echo "drop stage if exists presign_stage" | $BENDSQL_CLIENT_CONNECT echo "drop stage if exists s3" | $BENDSQL_CLIENT_CONNECT echo "drop user u1" | $BENDSQL_CLIENT_CONNECT echo "drop table if exists t" | $BENDSQL_CLIENT_CONNECT + +echo "drop user if exists b" | $BENDSQL_CLIENT_CONNECT +echo "create user b identified by '$TEST_USER_PASSWORD'" | $BENDSQL_CLIENT_CONNECT +echo "drop table if exists t" | $BENDSQL_CLIENT_CONNECT +echo "create table t(id int)" | $BENDSQL_CLIENT_CONNECT + +echo "grant insert, delete on default.t to b" | $BENDSQL_CLIENT_CONNECT + +cat << EOF > /tmp/00_0012/i1.csv +1 +2 +EOF + +#err: need select privilege on system.stage +echo "insert into t select \$1 from 'fs:///tmp/00_0020/' (FILE_FORMAT => 'CSV');" | $USER_B_CONNECT + +echo "grant select on system.* to b" | $BENDSQL_CLIENT_CONNECT + +echo "insert into t select \$1 from 'fs:///tmp/00_0020/' (FILE_FORMAT => 'CSV');" | $USER_B_CONNECT + +echo "drop table if exists t" | $BENDSQL_CLIENT_CONNECT +echo "drop user if exists b" | $BENDSQL_CLIENT_CONNECT rm -rf /tmp/00_0012 echo "unset enable_experimental_rbac_check" | $BENDSQL_CLIENT_CONNECT From ed2e2f2ff4abf121c3c10af5eff2b6f967ffc689 Mon Sep 17 00:00:00 2001 From: RinChanNOW Date: Tue, 5 Dec 2023 11:08:15 +0800 Subject: [PATCH 11/16] chore: push down sort pipeline to cluster nodes. (#13881) * chore: push down sort pipeline to cluster nodes. * fix pipeline. * hack exchange pipeline building for distributed sort. * Fix tests. * Deal with the case if `max_threads` = 1. * Deal with the case if max_threads = 1. --- src/query/expression/src/block.rs | 16 +-- .../src/processors/transforms/mod.rs | 1 + .../processors/transforms/sort/rows/common.rs | 2 +- .../processors/transforms/sort/rows/mod.rs | 2 +- .../processors/transforms/sort/rows/simple.rs | 2 +- .../transforms/transform_multi_sort_merge.rs | 106 +++++++++----- .../processors/transforms/transform_sort.rs | 57 +++++--- .../transforms/transform_sort_merge.rs | 135 +++++++++++++----- .../transforms/transform_sort_merge_limit.rs | 108 ++++++++++---- .../src/api/rpc/exchange/exchange_source.rs | 1 + .../pipelines/builders/builder_recluster.rs | 2 + .../src/pipelines/builders/builder_sort.rs | 116 +++++++++++---- .../src/pipelines/builders/builder_window.rs | 2 +- .../physical_plans/physical_exchange.rs | 7 +- .../executor/physical_plans/physical_sort.rs | 67 +++++++-- src/query/sql/src/planner/binder/sort.rs | 4 +- .../planner/format/display_rel_operator.rs | 3 + .../optimizer/distributed/distributed.rs | 6 +- .../src/planner/optimizer/distributed/mod.rs | 2 +- .../{topn.rs => sort_and_limit.rs} | 71 ++++----- src/query/sql/src/planner/plans/exchange.rs | 2 + src/query/sql/src/planner/plans/sort.rs | 3 +- .../mutator/update_by_expr_mutator.rs | 2 +- .../operations/mutation/mutation_source.rs | 2 +- .../fuse/src/statistics/cluster_statistics.rs | 6 +- .../mode/cluster/04_0002_explain_v2.test | 12 +- ...ibuted_topn.test => distributed_sort.test} | 58 ++++++-- .../suites/mode/cluster/lazy_read.test | 8 +- 28 files changed, 535 insertions(+), 268 deletions(-) rename src/query/sql/src/planner/optimizer/distributed/{topn.rs => sort_and_limit.rs} (74%) rename tests/sqllogictests/suites/mode/cluster/{distributed_topn.test => distributed_sort.test} (59%) diff --git a/src/query/expression/src/block.rs b/src/query/expression/src/block.rs index b36ebece129f..c6b6cc9ef10c 100644 --- a/src/query/expression/src/block.rs +++ b/src/query/expression/src/block.rs @@ -327,19 +327,9 @@ impl DataBlock { } #[inline] - pub fn pop_columns(self, num: usize) -> Result { - let mut columns = self.columns.clone(); - let len = columns.len(); - - for _ in 0..num.min(len) { - columns.pop().unwrap(); - } - - Ok(Self { - columns, - num_rows: self.num_rows, - meta: self.meta, - }) + pub fn pop_columns(&mut self, num: usize) { + debug_assert!(num <= self.columns.len()); + self.columns.truncate(self.columns.len() - num); } /// Resort the columns according to the schema. diff --git a/src/query/pipeline/transforms/src/processors/transforms/mod.rs b/src/query/pipeline/transforms/src/processors/transforms/mod.rs index 2e6b3f9fabcb..7373b394e855 100644 --- a/src/query/pipeline/transforms/src/processors/transforms/mod.rs +++ b/src/query/pipeline/transforms/src/processors/transforms/mod.rs @@ -37,6 +37,7 @@ pub use transform_block_compact_for_copy::*; pub use transform_blocking::*; pub use transform_compact::*; pub use transform_dummy::*; +pub use transform_multi_sort_merge::try_add_multi_sort_merge; pub use transform_sort::*; pub use transform_sort_merge::sort_merge; pub use transform_sort_partial::*; diff --git a/src/query/pipeline/transforms/src/processors/transforms/sort/rows/common.rs b/src/query/pipeline/transforms/src/processors/transforms/sort/rows/common.rs index eacc3c7bb256..80ec344e25cd 100644 --- a/src/query/pipeline/transforms/src/processors/transforms/sort/rows/common.rs +++ b/src/query/pipeline/transforms/src/processors/transforms/sort/rows/common.rs @@ -53,7 +53,7 @@ impl Rows for StringColumn { impl RowConverter for CommonRowConverter { fn create( - sort_columns_descriptions: Vec, + sort_columns_descriptions: &[SortColumnDescription], output_schema: DataSchemaRef, ) -> Result { let sort_fields = sort_columns_descriptions diff --git a/src/query/pipeline/transforms/src/processors/transforms/sort/rows/mod.rs b/src/query/pipeline/transforms/src/processors/transforms/sort/rows/mod.rs index 4ccafe7782f9..be3d0a4dc05f 100644 --- a/src/query/pipeline/transforms/src/processors/transforms/sort/rows/mod.rs +++ b/src/query/pipeline/transforms/src/processors/transforms/sort/rows/mod.rs @@ -29,7 +29,7 @@ pub trait RowConverter where Self: Sized { fn create( - sort_columns_descriptions: Vec, + sort_columns_descriptions: &[SortColumnDescription], output_schema: DataSchemaRef, ) -> Result; fn convert(&mut self, columns: &[BlockEntry], num_rows: usize) -> Result; diff --git a/src/query/pipeline/transforms/src/processors/transforms/sort/rows/simple.rs b/src/query/pipeline/transforms/src/processors/transforms/sort/rows/simple.rs index ae31d1711de6..c9223bed9b56 100644 --- a/src/query/pipeline/transforms/src/processors/transforms/sort/rows/simple.rs +++ b/src/query/pipeline/transforms/src/processors/transforms/sort/rows/simple.rs @@ -129,7 +129,7 @@ where T::Scalar: Ord, { fn create( - sort_columns_descriptions: Vec, + sort_columns_descriptions: &[SortColumnDescription], _: DataSchemaRef, ) -> Result { assert!(sort_columns_descriptions.len() == 1); diff --git a/src/query/pipeline/transforms/src/processors/transforms/transform_multi_sort_merge.rs b/src/query/pipeline/transforms/src/processors/transforms/transform_multi_sort_merge.rs index bfefe132f53e..804880ff2055 100644 --- a/src/query/pipeline/transforms/src/processors/transforms/transform_multi_sort_merge.rs +++ b/src/query/pipeline/transforms/src/processors/transforms/transform_multi_sort_merge.rs @@ -39,10 +39,12 @@ use common_pipeline_core::processors::ProcessorPtr; use common_pipeline_core::Pipe; use common_pipeline_core::PipeItem; use common_pipeline_core::Pipeline; +use common_profile::SharedProcessorProfiles; use super::sort::Cursor; use super::sort::Rows; use super::sort::SimpleRows; +use crate::processors::ProcessorProfileWrapper; pub fn try_add_multi_sort_merge( pipeline: &mut Pipeline, @@ -50,6 +52,8 @@ pub fn try_add_multi_sort_merge( block_size: usize, limit: Option, sort_columns_descriptions: Vec, + prof_info: Option<(u32, SharedProcessorProfiles)>, + remove_order_col: bool, ) -> Result<()> { if pipeline.is_empty() { return Err(ErrorCode::Internal("Cannot resize empty pipe.")); @@ -71,8 +75,19 @@ pub fn try_add_multi_sort_merge( block_size, limit, sort_columns_descriptions, + remove_order_col, )?; + let processor = if let Some((plan_id, prof)) = &prof_info { + ProcessorPtr::create(ProcessorProfileWrapper::create( + processor, + *plan_id, + prof.clone(), + )) + } else { + ProcessorPtr::create(processor) + }; + pipeline.add_pipe(Pipe::create(inputs_port.len(), 1, vec![PipeItem::create( processor, inputs_port, @@ -91,67 +106,71 @@ fn create_processor( block_size: usize, limit: Option, sort_columns_descriptions: Vec, -) -> Result { + remove_order_col: bool, +) -> Result> { Ok(if sort_columns_descriptions.len() == 1 { let sort_type = input_schema .field(sort_columns_descriptions[0].offset) .data_type(); match sort_type { DataType::Number(num_ty) => with_number_mapped_type!(|NUM_TYPE| match num_ty { - NumberDataType::NUM_TYPE => - ProcessorPtr::create(Box::new(MultiSortMergeProcessor::< - SimpleRows>, - >::create( - inputs, - output, - block_size, - limit, - sort_columns_descriptions, - )?)), + NumberDataType::NUM_TYPE => Box::new(MultiSortMergeProcessor::< + SimpleRows>, + >::create( + inputs, + output, + block_size, + limit, + sort_columns_descriptions, + remove_order_col, + )?), }), - DataType::Date => ProcessorPtr::create(Box::new(MultiSortMergeProcessor::< - SimpleRows, - >::create( + DataType::Date => Box::new(MultiSortMergeProcessor::>::create( inputs, output, block_size, limit, sort_columns_descriptions, - )?)), - DataType::Timestamp => ProcessorPtr::create(Box::new(MultiSortMergeProcessor::< - SimpleRows, - >::create( - inputs, - output, - block_size, - limit, - sort_columns_descriptions, - )?)), - DataType::String => ProcessorPtr::create(Box::new(MultiSortMergeProcessor::< - SimpleRows, - >::create( - inputs, - output, - block_size, - limit, - sort_columns_descriptions, - )?)), - _ => ProcessorPtr::create(Box::new(MultiSortMergeProcessor::::create( + remove_order_col, + )?), + DataType::Timestamp => Box::new( + MultiSortMergeProcessor::>::create( + inputs, + output, + block_size, + limit, + sort_columns_descriptions, + remove_order_col, + )?, + ), + DataType::String => { + Box::new(MultiSortMergeProcessor::>::create( + inputs, + output, + block_size, + limit, + sort_columns_descriptions, + remove_order_col, + )?) + } + _ => Box::new(MultiSortMergeProcessor::::create( inputs, output, block_size, limit, sort_columns_descriptions, - )?)), + remove_order_col, + )?), } } else { - ProcessorPtr::create(Box::new(MultiSortMergeProcessor::::create( + Box::new(MultiSortMergeProcessor::::create( inputs, output, block_size, limit, sort_columns_descriptions, - )?)) + remove_order_col, + )?) }) } @@ -168,6 +187,11 @@ where R: Rows // Parameters block_size: usize, limit: Option, + /// Indicate if we need to remove the order column. + /// In cluster sorting, the final processor on the cluster node will be [`MultiSortMergeProcessor`], + /// and the first processor on the coordinator node will be it, too. + /// Therefore, we don't need to remove the order column if it's a cluster node. + remove_order_col: bool, /// For each input port, maintain a dequeue of data blocks. blocks: Vec>, @@ -195,6 +219,7 @@ where R: Rows block_size: usize, limit: Option, sort_desc: Vec, + remove_order_col: bool, ) -> Result { let input_size = inputs.len(); Ok(Self { @@ -203,6 +228,7 @@ where R: Rows sort_desc, block_size, limit, + remove_order_col, blocks: vec![VecDeque::with_capacity(2); input_size], heap: BinaryHeap::with_capacity(input_size), in_progress_rows: vec![], @@ -484,7 +510,7 @@ where R: Rows + Send + 'static if block.is_empty() { continue; } - let block = block.convert_to_full(); + let mut block = block.convert_to_full(); let order_col = block .columns() .last() @@ -497,7 +523,9 @@ where R: Rows + Send + 'static ErrorCode::BadDataValueType("Order column type mismatched.") })?; // Remove the order column - let block = block.pop_columns(1)?; + if self.remove_order_col { + block.pop_columns(1); + } let cursor = Cursor::new(input_index, rows); self.heap.push(Reverse(cursor)); self.cursor_finished[input_index] = false; diff --git a/src/query/pipeline/transforms/src/processors/transforms/transform_sort.rs b/src/query/pipeline/transforms/src/processors/transforms/transform_sort.rs index 60db5c8e38a6..c96851c7f389 100644 --- a/src/query/pipeline/transforms/src/processors/transforms/transform_sort.rs +++ b/src/query/pipeline/transforms/src/processors/transforms/transform_sort.rs @@ -34,25 +34,21 @@ pub fn build_full_sort_pipeline( partial_block_size: usize, final_block_size: usize, prof_info: Option<(u32, SharedProcessorProfiles)>, - after_exchange: bool, + remove_order_col_at_last: bool, ) -> Result<()> { // Partial sort - if limit.is_none() || !after_exchange { - // If the sort plan is after an exchange plan, the blocks are already partially sorted on other nodes. - pipeline.add_transform(|input, output| { - let transform = - TransformSortPartial::try_create(input, output, limit, sort_desc.clone())?; - if let Some((plan_id, prof)) = &prof_info { - Ok(ProcessorPtr::create(ProcessorProfileWrapper::create( - transform, - *plan_id, - prof.clone(), - ))) - } else { - Ok(ProcessorPtr::create(transform)) - } - })?; - } + pipeline.add_transform(|input, output| { + let transform = TransformSortPartial::try_create(input, output, limit, sort_desc.clone())?; + if let Some((plan_id, prof)) = &prof_info { + Ok(ProcessorPtr::create(ProcessorProfileWrapper::create( + transform, + *plan_id, + prof.clone(), + ))) + } else { + Ok(ProcessorPtr::create(transform)) + } + })?; build_merge_sort_pipeline( pipeline, @@ -62,9 +58,12 @@ pub fn build_full_sort_pipeline( partial_block_size, final_block_size, prof_info, + false, + remove_order_col_at_last, ) } +#[allow(clippy::too_many_arguments)] pub fn build_merge_sort_pipeline( pipeline: &mut Pipeline, input_schema: DataSchemaRef, @@ -73,9 +72,17 @@ pub fn build_merge_sort_pipeline( partial_block_size: usize, final_block_size: usize, prof_info: Option<(u32, SharedProcessorProfiles)>, + order_col_generated: bool, + remove_order_col_at_last: bool, ) -> Result<()> { // Merge sort let need_multi_merge = pipeline.output_len() > 1; + debug_assert!(if order_col_generated { + // If `order_col_generated`, it means this transform is the last processor in the distributed sort pipeline. + !need_multi_merge && remove_order_col_at_last + } else { + true + }); pipeline.add_transform(|input, output| { let transform = match limit { Some(limit) => try_create_transform_sort_merge_limit( @@ -85,7 +92,8 @@ pub fn build_merge_sort_pipeline( sort_desc.clone(), partial_block_size, limit, - need_multi_merge, + order_col_generated, + need_multi_merge || !remove_order_col_at_last, )?, _ => try_create_transform_sort_merge( input, @@ -93,7 +101,8 @@ pub fn build_merge_sort_pipeline( input_schema.clone(), partial_block_size, sort_desc.clone(), - need_multi_merge, + order_col_generated, + need_multi_merge || !remove_order_col_at_last, )?, }; @@ -110,7 +119,15 @@ pub fn build_merge_sort_pipeline( if need_multi_merge { // Multi-pipelines merge sort - try_add_multi_sort_merge(pipeline, input_schema, final_block_size, limit, sort_desc)?; + try_add_multi_sort_merge( + pipeline, + input_schema, + final_block_size, + limit, + sort_desc, + prof_info.clone(), + remove_order_col_at_last, + )?; } Ok(()) diff --git a/src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge.rs b/src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge.rs index dd5808971557..708da25531d3 100644 --- a/src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge.rs +++ b/src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge.rs @@ -54,13 +54,18 @@ use super::TransformCompact; pub struct SortMergeCompactor { block_size: usize, row_converter: Converter, - order_by_cols: Vec, + sort_desc: Vec, aborting: Arc, /// If the next transform of current transform is [`super::transform_multi_sort_merge::MultiSortMergeProcessor`], - /// we can generate the order column to avoid the extra converting in the next transform. - gen_order_col: bool, + /// we can generate and output the order column to avoid the extra converting in the next transform. + output_order_col: bool, + /// If this transform is after an Exchange transform, + /// it means it will compact the data from cluster nodes. + /// And the order column is already generated in each cluster node, + /// so we don't need to generate the order column again. + order_col_generated: bool, _c: PhantomData, _r: PhantomData, @@ -75,16 +80,26 @@ where schema: DataSchemaRef, block_size: usize, sort_desc: Vec, - gen_order_col: bool, + order_col_generated: bool, + output_order_col: bool, ) -> Result { - let order_by_cols = sort_desc.iter().map(|i| i.offset).collect::>(); - let row_converter = Converter::create(sort_desc, schema)?; + debug_assert!(if order_col_generated { + // If the order column is already generated, + // it means this transform is after a exchange source and it's the last transform for sorting. + // We should remove the order column. + !output_order_col + } else { + true + }); + + let row_converter = Converter::create(&sort_desc, schema)?; Ok(SortMergeCompactor { - order_by_cols, row_converter, block_size, + sort_desc, aborting: Arc::new(AtomicBool::new(false)), - gen_order_col, + order_col_generated, + output_order_col, _c: PhantomData, _r: PhantomData, }) @@ -120,19 +135,26 @@ where .collect::>(); if blocks.len() == 1 { - if self.gen_order_col { - let block = blocks.get_mut(0).ok_or(ErrorCode::Internal("It's a bug"))?; + let block = blocks.get_mut(0).ok_or(ErrorCode::Internal("It's a bug"))?; + if self.order_col_generated { + // Need to remove order column. + block.pop_columns(1); + return Ok(blocks); + } + if self.output_order_col { let columns = self - .order_by_cols + .sort_desc .iter() - .map(|i| block.get_by_offset(*i).clone()) + .map(|d| block.get_by_offset(d.offset).clone()) .collect::>(); let rows = self.row_converter.convert(&columns, block.num_rows())?; let order_col = rows.to_column(); - block.add_column(BlockEntry { - data_type: order_col.data_type(), - value: Value::Column(order_col), - }); + if self.output_order_col { + block.add_column(BlockEntry { + data_type: order_col.data_type(), + value: Value::Column(order_col), + }); + } } return Ok(blocks); } @@ -144,20 +166,36 @@ where // 1. Put all blocks into a min-heap. for (i, block) in blocks.iter_mut().enumerate() { - let columns = self - .order_by_cols - .iter() - .map(|i| block.get_by_offset(*i).clone()) - .collect::>(); - let rows = self.row_converter.convert(&columns, block.num_rows())?; - - if self.gen_order_col { - let order_col = rows.to_column(); - block.add_column(BlockEntry { - data_type: order_col.data_type(), - value: Value::Column(order_col), - }); - } + let rows = if self.order_col_generated { + let order_col = block + .columns() + .last() + .unwrap() + .value + .as_column() + .unwrap() + .clone(); + let rows = R::from_column(order_col, &self.sort_desc) + .ok_or_else(|| ErrorCode::BadDataValueType("Order column type mismatched."))?; + // Need to remove order column. + block.pop_columns(1); + rows + } else { + let columns = self + .sort_desc + .iter() + .map(|d| block.get_by_offset(d.offset).clone()) + .collect::>(); + let rows = self.row_converter.convert(&columns, block.num_rows())?; + if self.output_order_col { + let order_col = rows.to_column(); + block.add_column(BlockEntry { + data_type: order_col.data_type(), + value: Value::Column(order_col), + }); + } + rows + }; let cursor = Cursor::new(i, rows); heap.push(Reverse(cursor)); } @@ -246,7 +284,8 @@ pub fn try_create_transform_sort_merge( output_schema: DataSchemaRef, block_size: usize, sort_desc: Vec, - gen_order_col: bool, + order_col_generated: bool, + output_order_col: bool, ) -> Result> { if sort_desc.len() == 1 { let sort_type = output_schema.field(sort_desc[0].offset).data_type(); @@ -264,7 +303,11 @@ pub fn try_create_transform_sort_merge( SimpleRows>, SimpleRowConverter>, >::try_create( - output_schema, block_size, sort_desc, gen_order_col + output_schema, + block_size, + sort_desc, + order_col_generated, + output_order_col )? ), }), @@ -275,7 +318,8 @@ pub fn try_create_transform_sort_merge( output_schema, block_size, sort_desc, - gen_order_col, + order_col_generated, + output_order_col, )?, ), DataType::Timestamp => SimpleTimestampSort::try_create( @@ -285,7 +329,8 @@ pub fn try_create_transform_sort_merge( output_schema, block_size, sort_desc, - gen_order_col, + order_col_generated, + output_order_col, )?, ), DataType::String => SimpleStringSort::try_create( @@ -295,20 +340,33 @@ pub fn try_create_transform_sort_merge( output_schema, block_size, sort_desc, - gen_order_col, + order_col_generated, + output_order_col, )?, ), _ => CommonSort::try_create( input, output, - CommonCompactor::try_create(output_schema, block_size, sort_desc, gen_order_col)?, + CommonCompactor::try_create( + output_schema, + block_size, + sort_desc, + order_col_generated, + output_order_col, + )?, ), } } else { CommonSort::try_create( input, output, - CommonCompactor::try_create(output_schema, block_size, sort_desc, gen_order_col)?, + CommonCompactor::try_create( + output_schema, + block_size, + sort_desc, + order_col_generated, + output_order_col, + )?, ) } } @@ -319,6 +377,7 @@ pub fn sort_merge( sort_desc: Vec, data_blocks: Vec, ) -> Result> { - let mut compactor = CommonCompactor::try_create(data_schema, block_size, sort_desc, false)?; + let mut compactor = + CommonCompactor::try_create(data_schema, block_size, sort_desc, false, false)?; compactor.compact_final(data_blocks) } diff --git a/src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge_limit.rs b/src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge_limit.rs index 7ba49fba89e4..21af08fc568e 100644 --- a/src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge_limit.rs +++ b/src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge_limit.rs @@ -17,6 +17,7 @@ use std::collections::HashMap; use std::sync::Arc; use common_base::containers::FixedHeap; +use common_exception::ErrorCode; use common_exception::Result; use common_expression::row::RowConverter as CommonRowConverter; use common_expression::types::string::StringColumn; @@ -44,17 +45,21 @@ use super::sort::SimpleRows; use super::AccumulatingTransform; use super::AccumulatingTransformer; +/// This is a specific version of [`super::transform_sort_merge::SortMergeCompactor`] which sort blocks with limit. +/// +/// Definitions of some same fields can be found in [`super::transform_sort_merge::SortMergeCompactor`]. pub struct TransformSortMergeLimit { row_converter: Converter, - order_by_cols: Vec, heap: FixedHeap>>>, + sort_desc: Vec, buffer: HashMap, cur_index: usize, block_size: usize, - gen_order_col: bool, + order_col_generated: bool, + output_order_col: bool, } impl TransformSortMergeLimit @@ -67,18 +72,25 @@ where sort_desc: Vec, block_size: usize, limit: usize, - gen_order_col: bool, + order_col_generated: bool, + output_order_col: bool, ) -> Result { - let order_by_cols = sort_desc.iter().map(|i| i.offset).collect::>(); - let row_converter = Converter::create(sort_desc, schema)?; + debug_assert!(if order_col_generated { + !output_order_col + } else { + true + }); + + let row_converter = Converter::create(&sort_desc, schema)?; Ok(TransformSortMergeLimit { row_converter, - order_by_cols, + sort_desc, heap: FixedHeap::new(limit), buffer: HashMap::with_capacity(limit), block_size, cur_index: 0, - gen_order_col, + order_col_generated, + output_order_col, }) } } @@ -100,23 +112,39 @@ where return Ok(vec![]); } - let order_by_cols = self - .order_by_cols - .iter() - .map(|i| data.get_by_offset(*i).clone()) - .collect::>(); - let rows = Arc::new( - self.row_converter - .convert(&order_by_cols, data.num_rows())?, - ); - - if self.gen_order_col { - let order_col = rows.to_column(); - data.add_column(BlockEntry { - data_type: order_col.data_type(), - value: Value::Column(order_col), - }); - } + let rows = if self.order_col_generated { + let order_col = data + .columns() + .last() + .unwrap() + .value + .as_column() + .unwrap() + .clone(); + let rows = R::from_column(order_col, &self.sort_desc) + .ok_or_else(|| ErrorCode::BadDataValueType("Order column type mismatched."))?; + // Need to remove order column. + data.pop_columns(1); + Arc::new(rows) + } else { + let order_by_cols = self + .sort_desc + .iter() + .map(|d| data.get_by_offset(d.offset).clone()) + .collect::>(); + let rows = Arc::new( + self.row_converter + .convert(&order_by_cols, data.num_rows())?, + ); + if self.output_order_col { + let order_col = rows.to_column(); + data.add_column(BlockEntry { + data_type: order_col.data_type(), + value: Value::Column(order_col), + }); + } + rows + }; let mut cursor = Cursor::new(self.cur_index, rows); self.buffer.insert(self.cur_index, data); @@ -200,6 +228,7 @@ type SimpleStringSort = AccumulatingTransformer; type CommonTransform = TransformSortMergeLimit; type CommonSort = AccumulatingTransformer; +#[allow(clippy::too_many_arguments)] pub fn try_create_transform_sort_merge_limit( input: Arc, output: Arc, @@ -207,7 +236,8 @@ pub fn try_create_transform_sort_merge_limit( sort_desc: Vec, block_size: usize, limit: usize, - gen_order_col: bool, + order_col_generated: bool, + output_order_col: bool, ) -> Result> { Ok(if sort_desc.len() == 1 { let sort_type = input_schema.field(sort_desc[0].offset).data_type(); @@ -225,7 +255,12 @@ pub fn try_create_transform_sort_merge_limit( SimpleRows>, SimpleRowConverter>, >::try_create( - input_schema, sort_desc, block_size, limit, gen_order_col + input_schema, + sort_desc, + block_size, + limit, + order_col_generated, + output_order_col )? ), }), @@ -237,7 +272,8 @@ pub fn try_create_transform_sort_merge_limit( sort_desc, block_size, limit, - gen_order_col, + order_col_generated, + output_order_col, )?, ), DataType::Timestamp => SimpleTimestampSort::create( @@ -248,7 +284,8 @@ pub fn try_create_transform_sort_merge_limit( sort_desc, block_size, limit, - gen_order_col, + order_col_generated, + output_order_col, )?, ), DataType::String => SimpleStringSort::create( @@ -259,7 +296,8 @@ pub fn try_create_transform_sort_merge_limit( sort_desc, block_size, limit, - gen_order_col, + order_col_generated, + output_order_col, )?, ), _ => CommonSort::create( @@ -270,7 +308,8 @@ pub fn try_create_transform_sort_merge_limit( sort_desc, block_size, limit, - gen_order_col, + order_col_generated, + output_order_col, )?, ), } @@ -278,7 +317,14 @@ pub fn try_create_transform_sort_merge_limit( CommonSort::create( input, output, - CommonTransform::try_create(input_schema, sort_desc, block_size, limit, gen_order_col)?, + CommonTransform::try_create( + input_schema, + sort_desc, + block_size, + limit, + order_col_generated, + output_order_col, + )?, ) }) } diff --git a/src/query/service/src/api/rpc/exchange/exchange_source.rs b/src/query/service/src/api/rpc/exchange/exchange_source.rs index aab9466c0eaf..bed2901ae3e1 100644 --- a/src/query/service/src/api/rpc/exchange/exchange_source.rs +++ b/src/query/service/src/api/rpc/exchange/exchange_source.rs @@ -31,6 +31,7 @@ use crate::api::ExchangeInjector; use crate::clusters::ClusterHelper; use crate::sessions::QueryContext; +/// Add Exchange Source to the pipeline. pub fn via_exchange_source( ctx: Arc, params: &MergeExchangeParams, diff --git a/src/query/service/src/pipelines/builders/builder_recluster.rs b/src/query/service/src/pipelines/builders/builder_recluster.rs index 9be90a3fb33b..47732a6e133e 100644 --- a/src/query/service/src/pipelines/builders/builder_recluster.rs +++ b/src/query/service/src/pipelines/builders/builder_recluster.rs @@ -170,6 +170,8 @@ impl PipelineBuilder { partial_block_size, final_block_size, None, + false, + true, )?; let output_block_num = task.total_rows.div_ceil(final_block_size); diff --git a/src/query/service/src/pipelines/builders/builder_sort.rs b/src/query/service/src/pipelines/builders/builder_sort.rs index cba7ff4ef38f..652721435df3 100644 --- a/src/query/service/src/pipelines/builders/builder_sort.rs +++ b/src/query/service/src/pipelines/builders/builder_sort.rs @@ -17,6 +17,8 @@ use common_expression::DataSchemaRef; use common_expression::SortColumnDescription; use common_pipeline_core::processors::ProcessorPtr; use common_pipeline_transforms::processors::build_full_sort_pipeline; +use common_pipeline_transforms::processors::build_merge_sort_pipeline; +use common_pipeline_transforms::processors::try_add_multi_sort_merge; use common_sql::evaluator::BlockOperator; use common_sql::evaluator::CompoundBlockOperator; use common_sql::executor::physical_plans::Sort; @@ -24,31 +26,36 @@ use common_sql::executor::physical_plans::Sort; use crate::pipelines::PipelineBuilder; impl PipelineBuilder { + // The pipeline graph of distributed sort can be found in https://github.com/datafuselabs/databend/pull/13881 pub(crate) fn build_sort(&mut self, sort: &Sort) -> Result<()> { self.build_pipeline(&sort.input)?; let input_schema = sort.input.output_schema()?; - if let Some(proj) = &sort.pre_projection { - // Do projection to reduce useless data copying during sorting. - let projection = proj - .iter() - .filter_map(|i| input_schema.index_of(&i.to_string()).ok()) - .collect::>(); + if !matches!(sort.after_exchange, Some(true)) { + // If the Sort plan is after exchange, we don't need to do a projection, + // because the data is already projected in each cluster node. + if let Some(proj) = &sort.pre_projection { + // Do projection to reduce useless data copying during sorting. + let projection = proj + .iter() + .filter_map(|i| input_schema.index_of(&i.to_string()).ok()) + .collect::>(); - if projection.len() < input_schema.fields().len() { - // Only if the projection is not a full projection, we need to add a projection transform. - self.main_pipeline.add_transform(|input, output| { - Ok(ProcessorPtr::create(CompoundBlockOperator::create( - input, - output, - input_schema.num_fields(), - self.func_ctx.clone(), - vec![BlockOperator::Project { - projection: projection.clone(), - }], - ))) - })?; + if projection.len() < input_schema.fields().len() { + // Only if the projection is not a full projection, we need to add a projection transform. + self.main_pipeline.add_transform(|input, output| { + Ok(ProcessorPtr::create(CompoundBlockOperator::create( + input, + output, + input_schema.num_fields(), + self.func_ctx.clone(), + vec![BlockOperator::Project { + projection: projection.clone(), + }], + ))) + })?; + } } } @@ -83,7 +90,7 @@ impl PipelineBuilder { sort_desc: Vec, plan_id: u32, limit: Option, - after_exchange: bool, + after_exchange: Option, ) -> Result<()> { let block_size = self.settings.get_max_block_size()? as usize; let max_threads = self.settings.get_max_threads()? as usize; @@ -98,15 +105,64 @@ impl PipelineBuilder { None }; - build_full_sort_pipeline( - &mut self.main_pipeline, - input_schema, - sort_desc, - limit, - block_size, - block_size, - prof_info, - after_exchange, - ) + match after_exchange { + Some(true) => { + // Build for the coordinator node. + // We only build a `MultiSortMergeTransform`, + // as the data is already sorted in each cluster node. + // The input number of the transform is equal to the number of cluster nodes. + if self.main_pipeline.output_len() > 1 { + try_add_multi_sort_merge( + &mut self.main_pipeline, + input_schema, + block_size, + limit, + sort_desc, + prof_info, + true, + ) + } else { + build_merge_sort_pipeline( + &mut self.main_pipeline, + input_schema, + sort_desc, + limit, + block_size, + block_size, + prof_info, + true, + true, + ) + } + } + Some(false) => { + // Build for each cluster node. + // We build the full sort pipeline for it. + build_full_sort_pipeline( + &mut self.main_pipeline, + input_schema, + sort_desc, + limit, + block_size, + block_size, + prof_info, + false, + ) + } + None => { + // Build for single node mode. + // We build the full sort pipeline for it. + build_full_sort_pipeline( + &mut self.main_pipeline, + input_schema, + sort_desc, + limit, + block_size, + block_size, + prof_info, + true, + ) + } + } } } diff --git a/src/query/service/src/pipelines/builders/builder_window.rs b/src/query/service/src/pipelines/builders/builder_window.rs index 6f6b5e7f277d..3e461cf3b845 100644 --- a/src/query/service/src/pipelines/builders/builder_window.rs +++ b/src/query/service/src/pipelines/builders/builder_window.rs @@ -70,7 +70,7 @@ impl PipelineBuilder { sort_desc.extend(order_by.clone()); - self.build_sort_pipeline(input_schema.clone(), sort_desc, window.plan_id, None, false)?; + self.build_sort_pipeline(input_schema.clone(), sort_desc, window.plan_id, None, None)?; } // `TransformWindow` is a pipeline breaker. self.main_pipeline.try_resize(1)?; diff --git a/src/query/sql/src/executor/physical_plans/physical_exchange.rs b/src/query/sql/src/executor/physical_plans/physical_exchange.rs index eaaf0f652db7..083aa4de0ffd 100644 --- a/src/query/sql/src/executor/physical_plans/physical_exchange.rs +++ b/src/query/sql/src/executor/physical_plans/physical_exchange.rs @@ -61,6 +61,7 @@ impl PhysicalPlanBuilder { let input = Box::new(self.build(s_expr.child(0)?, required).await?); let input_schema = input.output_schema()?; let mut keys = vec![]; + let mut allow_adjust_parallelism = true; let kind = match exchange { crate::plans::Exchange::Random => FragmentKind::Init, crate::plans::Exchange::Hash(scalars) => { @@ -77,13 +78,17 @@ impl PhysicalPlanBuilder { } crate::plans::Exchange::Broadcast => FragmentKind::Expansive, crate::plans::Exchange::Merge => FragmentKind::Merge, + crate::plans::Exchange::MergeSort => { + allow_adjust_parallelism = false; + FragmentKind::Merge + } }; Ok(PhysicalPlan::Exchange(Exchange { plan_id: self.next_plan_id(), input, kind, keys, - allow_adjust_parallelism: true, + allow_adjust_parallelism, ignore_exchange: false, })) } diff --git a/src/query/sql/src/executor/physical_plans/physical_sort.rs b/src/query/sql/src/executor/physical_plans/physical_sort.rs index 495a4c48a55a..acb15d7b57a4 100644 --- a/src/query/sql/src/executor/physical_plans/physical_sort.rs +++ b/src/query/sql/src/executor/physical_plans/physical_sort.rs @@ -13,6 +13,9 @@ // limitations under the License. use common_exception::Result; +use common_expression::types::DataType; +use common_expression::DataField; +use common_expression::DataSchema; use common_expression::DataSchemaRef; use common_expression::DataSchemaRefExt; use itertools::Itertools; @@ -27,14 +30,15 @@ use crate::IndexType; #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] pub struct Sort { - // A unique id of operator in a `PhysicalPlan` tree, only used for display. + /// A unique id of operator in a `PhysicalPlan` tree, only used for display. pub plan_id: u32, pub input: Box, pub order_by: Vec, - // limit = Limit.limit + Limit.offset + /// limit = Limit.limit + Limit.offset pub limit: Option, - // If the sort plan is after the exchange plan - pub after_exchange: bool, + /// If the sort plan is after the exchange plan. + /// It's [None] if the sorting plan is in single node mode. + pub after_exchange: Option, pub pre_projection: Option>, // Only used for explain @@ -42,20 +46,55 @@ pub struct Sort { } impl Sort { + fn order_col_type(&self, schema: &DataSchema) -> Result { + if self.order_by.len() == 1 { + let order_by_field = schema.field_with_name(&self.order_by[0].order_by.to_string())?; + if matches!( + order_by_field.data_type(), + DataType::Number(_) | DataType::Date | DataType::Timestamp | DataType::String + ) { + return Ok(order_by_field.data_type().clone()); + } + } + Ok(DataType::String) + } + pub fn output_schema(&self) -> Result { let input_schema = self.input.output_schema()?; - if let Some(proj) = &self.pre_projection { - let fields = proj - .iter() - .filter_map(|index| input_schema.field_with_name(&index.to_string()).ok()) - .cloned() - .collect::>(); - if fields.len() < input_schema.fields().len() { - // Only if the projection is not a full projection, we need to add a projection transform. - return Ok(DataSchemaRefExt::create(fields)); + let mut fields = input_schema.fields().clone(); + if matches!(self.after_exchange, Some(true)) { + // If the plan is after exchange plan in cluster mode, + // the order column is at the last of the input schema. + debug_assert_eq!(fields.last().unwrap().name(), "_order_col"); + debug_assert_eq!( + fields.last().unwrap().data_type(), + &self.order_col_type(&input_schema)? + ); + fields.pop(); + } else { + if let Some(proj) = &self.pre_projection { + let fileted_fields = proj + .iter() + .filter_map(|index| input_schema.field_with_name(&index.to_string()).ok()) + .cloned() + .collect::>(); + if fileted_fields.len() < fields.len() { + // Only if the projection is not a full projection, we need to add a projection transform. + fields = fileted_fields + } + } + + if matches!(self.after_exchange, Some(false)) { + // If the plan is before exchange plan in cluster mode, + // the order column should be added to the output schema. + fields.push(DataField::new( + "_order_col", + self.order_col_type(&input_schema)?, + )); } } - Ok(input_schema) + + Ok(DataSchemaRefExt::create(fields)) } } diff --git a/src/query/sql/src/planner/binder/sort.rs b/src/query/sql/src/planner/binder/sort.rs index 37404577aa13..41150fca49b6 100644 --- a/src/query/sql/src/planner/binder/sort.rs +++ b/src/query/sql/src/planner/binder/sort.rs @@ -244,7 +244,7 @@ impl Binder { let sort_plan = Sort { items: order_by_items, limit: None, - after_exchange: false, + after_exchange: None, pre_projection: None, }; new_expr = SExpr::create_unary(Arc::new(sort_plan.into()), Arc::new(new_expr)); @@ -299,7 +299,7 @@ impl Binder { let sort_plan = Sort { items: order_by_items, limit: None, - after_exchange: false, + after_exchange: None, pre_projection: None, }; Ok(SExpr::create_unary( diff --git a/src/query/sql/src/planner/format/display_rel_operator.rs b/src/query/sql/src/planner/format/display_rel_operator.rs index c75cecbdadca..47a0ae1ebec4 100644 --- a/src/query/sql/src/planner/format/display_rel_operator.rs +++ b/src/query/sql/src/planner/format/display_rel_operator.rs @@ -197,6 +197,9 @@ pub fn format_exchange( Exchange::Merge => { write!(f, "Exchange(Merge)") } + Exchange::MergeSort => { + write!(f, "Exchange(MergeSort)") + } } } diff --git a/src/query/sql/src/planner/optimizer/distributed/distributed.rs b/src/query/sql/src/planner/optimizer/distributed/distributed.rs index c890cb820a98..71aad2ce6519 100644 --- a/src/query/sql/src/planner/optimizer/distributed/distributed.rs +++ b/src/query/sql/src/planner/optimizer/distributed/distributed.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use common_catalog::table_context::TableContext; use common_exception::Result; -use super::topn::TopNPushDownOptimizer; +use super::sort_and_limit::SortAndLimitPushDownOptimizer; use crate::optimizer::property::require_property; use crate::optimizer::Distribution; use crate::optimizer::RelExpr; @@ -31,8 +31,8 @@ pub fn optimize_distributed_query(ctx: Arc, s_expr: &SExpr) -> }; let result = require_property(ctx, &required, s_expr)?; - let topn_optimizer = TopNPushDownOptimizer::create(); - let mut result = topn_optimizer.optimize(&result)?; + let sort_and_limit_optimizer = SortAndLimitPushDownOptimizer::create(); + let mut result = sort_and_limit_optimizer.optimize(&result)?; let rel_expr = RelExpr::with_s_expr(&result); let physical_prop = rel_expr.derive_physical_prop()?; diff --git a/src/query/sql/src/planner/optimizer/distributed/mod.rs b/src/query/sql/src/planner/optimizer/distributed/mod.rs index 88619549edc5..e145cb03e5cf 100644 --- a/src/query/sql/src/planner/optimizer/distributed/mod.rs +++ b/src/query/sql/src/planner/optimizer/distributed/mod.rs @@ -15,7 +15,7 @@ #[allow(clippy::module_inception)] mod distributed; mod distributed_merge; -mod topn; +mod sort_and_limit; pub use distributed::optimize_distributed_query; pub use distributed_merge::MergeSourceOptimizer; diff --git a/src/query/sql/src/planner/optimizer/distributed/topn.rs b/src/query/sql/src/planner/optimizer/distributed/sort_and_limit.rs similarity index 74% rename from src/query/sql/src/planner/optimizer/distributed/topn.rs rename to src/query/sql/src/planner/optimizer/distributed/sort_and_limit.rs index 973f329115f9..85181d806c77 100644 --- a/src/query/sql/src/planner/optimizer/distributed/topn.rs +++ b/src/query/sql/src/planner/optimizer/distributed/sort_and_limit.rs @@ -17,36 +17,36 @@ use std::sync::Arc; use common_exception::Result; use crate::optimizer::SExpr; +use crate::plans::Exchange; use crate::plans::Limit; use crate::plans::PatternPlan; use crate::plans::RelOp; +use crate::plans::RelOperator; use crate::plans::Sort; -pub(super) struct TopNPushDownOptimizer { - topn_pattern: SExpr, +pub(super) struct SortAndLimitPushDownOptimizer { + sort_pattern: SExpr, limit_pattern: SExpr, } -impl TopNPushDownOptimizer { +impl SortAndLimitPushDownOptimizer { pub fn create() -> Self { Self { - topn_pattern: Self::topn_pattern(), + sort_pattern: Self::sort_pattern(), limit_pattern: Self::limit_pattern(), } } - fn topn_pattern() -> SExpr { + /// `limit` is already pushed down to `Sort`, + /// so the TopN scenario is already contained in this pattern. + fn sort_pattern() -> SExpr { // Input: - // Limit - // \ // Sort // \ // Exchange // \ // * // Output: - // Limit - // \ // Sort (after_exchange = true) // \ // Exchange @@ -57,31 +57,23 @@ impl TopNPushDownOptimizer { SExpr::create_unary( Arc::new( PatternPlan { - plan_type: RelOp::Limit, + plan_type: RelOp::Sort, } .into(), ), Arc::new(SExpr::create_unary( Arc::new( PatternPlan { - plan_type: RelOp::Sort, + plan_type: RelOp::Exchange, } .into(), ), - Arc::new(SExpr::create_unary( - Arc::new( - PatternPlan { - plan_type: RelOp::Exchange, - } - .into(), - ), - Arc::new(SExpr::create_leaf(Arc::new( - PatternPlan { - plan_type: RelOp::Pattern, - } - .into(), - ))), - )), + Arc::new(SExpr::create_leaf(Arc::new( + PatternPlan { + plan_type: RelOp::Pattern, + } + .into(), + ))), )), ) } @@ -132,34 +124,31 @@ impl TopNPushDownOptimizer { replaced_children.push(Arc::new(new_child)); } let new_sexpr = s_expr.replace_children(replaced_children); - let apply_topn_res = self.apply_topn(&new_sexpr)?; + let apply_topn_res = self.apply_sort(&new_sexpr)?; self.apply_limit(&apply_topn_res) } - fn apply_topn(&self, s_expr: &SExpr) -> Result { - if !s_expr.match_pattern(&self.topn_pattern) { - return Ok(s_expr.clone()); - } - - let sort_sexpr = s_expr.child(0)?; - let exchange_sexpr = sort_sexpr.child(0)?; - - let mut sort: Sort = sort_sexpr.plan().clone().try_into()?; - - if sort.limit.is_none() { - // It could be a ORDER BY ... OFFSET ... clause. (No LIMIT) + fn apply_sort(&self, s_expr: &SExpr) -> Result { + if !s_expr.match_pattern(&self.sort_pattern) { return Ok(s_expr.clone()); } + let mut sort: Sort = s_expr.plan().clone().try_into()?; + sort.after_exchange = Some(false); + let exchange_sexpr = s_expr.child(0)?; + debug_assert!(matches!( + exchange_sexpr.plan.as_ref(), + RelOperator::Exchange(Exchange::Merge) + )); debug_assert!(exchange_sexpr.children.len() == 1); + let exchange_sexpr = exchange_sexpr.replace_plan(Arc::new(Exchange::MergeSort.into())); let child = exchange_sexpr.child(0)?.clone(); let before_exchange_sort = SExpr::create_unary(Arc::new(sort.clone().into()), Arc::new(child)); let new_exchange = exchange_sexpr.replace_children(vec![Arc::new(before_exchange_sort)]); - sort.after_exchange = true; - let new_sort = SExpr::create_unary(Arc::new(sort.into()), Arc::new(new_exchange)); - let new_plan = s_expr.replace_children(vec![Arc::new(new_sort)]); + sort.after_exchange = Some(true); + let new_plan = SExpr::create_unary(Arc::new(sort.into()), Arc::new(new_exchange)); Ok(new_plan) } diff --git a/src/query/sql/src/planner/plans/exchange.rs b/src/query/sql/src/planner/plans/exchange.rs index 80823cd5a95f..59a81cc261bc 100644 --- a/src/query/sql/src/planner/plans/exchange.rs +++ b/src/query/sql/src/planner/plans/exchange.rs @@ -33,6 +33,7 @@ pub enum Exchange { Hash(Vec), Broadcast, Merge, + MergeSort, // For distributed sort } impl Operator for Exchange { @@ -51,6 +52,7 @@ impl Operator for Exchange { Exchange::Hash(hash_keys) => Distribution::Hash(hash_keys.clone()), Exchange::Broadcast => Distribution::Broadcast, Exchange::Merge => Distribution::Serial, + Exchange::MergeSort => Distribution::Serial, }, }) } diff --git a/src/query/sql/src/planner/plans/sort.rs b/src/query/sql/src/planner/plans/sort.rs index e19e29c1c0c4..f37a5536b298 100644 --- a/src/query/sql/src/planner/plans/sort.rs +++ b/src/query/sql/src/planner/plans/sort.rs @@ -33,7 +33,8 @@ pub struct Sort { pub limit: Option, /// If the sort plan is after the exchange plan. - pub after_exchange: bool, + /// It's [None] if the sorting plan is in single node mode. + pub after_exchange: Option, /// The columns needed by the plan after the sort plan. /// It's used to build a projection operation before building the sort operator. diff --git a/src/query/storages/fuse/src/operations/merge_into/mutator/update_by_expr_mutator.rs b/src/query/storages/fuse/src/operations/merge_into/mutator/update_by_expr_mutator.rs index 57392fe22789..4e067ae5d133 100644 --- a/src/query/storages/fuse/src/operations/merge_into/mutator/update_by_expr_mutator.rs +++ b/src/query/storages/fuse/src/operations/merge_into/mutator/update_by_expr_mutator.rs @@ -99,7 +99,7 @@ impl UpdateByExprMutator { let filter_entry = data_block.get_by_offset(data_block.num_columns() - 1); let old_filter: Value = filter_entry.value.try_downcast().unwrap(); // pop filter - data_block = data_block.pop_columns(1)?; + data_block.pop_columns(1); // has pop old filter let origin_block = data_block.clone(); // add filter diff --git a/src/query/storages/fuse/src/operations/mutation/mutation_source.rs b/src/query/storages/fuse/src/operations/mutation/mutation_source.rs index 3fe11e9de592..599d24d9a6f4 100644 --- a/src/query/storages/fuse/src/operations/mutation/mutation_source.rs +++ b/src/query/storages/fuse/src/operations/mutation/mutation_source.rs @@ -230,7 +230,7 @@ impl Processor for MutationSource { if affect_rows != 0 { // Pop the row_id column if self.query_row_id_col { - data_block = data_block.pop_columns(1)?; + data_block.pop_columns(1); } let progress_values = ProgressValues { diff --git a/src/query/storages/fuse/src/statistics/cluster_statistics.rs b/src/query/storages/fuse/src/statistics/cluster_statistics.rs index 24bd559a6425..4986d8c8d2f5 100644 --- a/src/query/storages/fuse/src/statistics/cluster_statistics.rs +++ b/src/query/storages/fuse/src/statistics/cluster_statistics.rs @@ -83,11 +83,11 @@ impl ClusterStatsGenerator { // The input block contains the cluster key block. pub fn gen_stats_for_append( &self, - data_block: DataBlock, + mut data_block: DataBlock, ) -> Result<(Option, DataBlock)> { let cluster_stats = self.clusters_statistics(&data_block, self.level)?; - let block = data_block.pop_columns(self.extra_key_num)?; - Ok((cluster_stats, block)) + data_block.pop_columns(self.extra_key_num); + Ok((cluster_stats, data_block)) } // This can be used in deletion, for an existing block. diff --git a/tests/sqllogictests/suites/mode/cluster/04_0002_explain_v2.test b/tests/sqllogictests/suites/mode/cluster/04_0002_explain_v2.test index c48c284d5f33..68e70946a7b4 100644 --- a/tests/sqllogictests/suites/mode/cluster/04_0002_explain_v2.test +++ b/tests/sqllogictests/suites/mode/cluster/04_0002_explain_v2.test @@ -215,10 +215,10 @@ Limit ├── sort keys: [c ASC NULLS LAST, d ASC NULLS LAST, e ASC NULLS LAST] ├── estimated rows: 0.00 └── Exchange - ├── output columns: [c (#4), d (#5), e (#6)] + ├── output columns: [c (#4), d (#5), e (#6), #_order_col] ├── exchange type: Merge └── Sort - ├── output columns: [c (#4), d (#5), e (#6)] + ├── output columns: [c (#4), d (#5), e (#6), #_order_col] ├── sort keys: [c ASC NULLS LAST, d ASC NULLS LAST, e ASC NULLS LAST] ├── estimated rows: 0.00 └── EvalScalar @@ -314,10 +314,10 @@ Limit ├── sort keys: [number ASC NULLS LAST] ├── estimated rows: 10.00 └── Exchange - ├── output columns: [numbers.number (#0)] + ├── output columns: [numbers.number (#0), #_order_col] ├── exchange type: Merge └── Sort - ├── output columns: [numbers.number (#0)] + ├── output columns: [numbers.number (#0), #_order_col] ├── sort keys: [number ASC NULLS LAST] ├── estimated rows: 10.00 └── TableScan @@ -343,10 +343,10 @@ Limit ├── sort keys: [number ASC NULLS LAST] ├── estimated rows: 50.00 └── Exchange - ├── output columns: [numbers.number (#0), numbers.number (#1)] + ├── output columns: [numbers.number (#0), numbers.number (#1), #_order_col] ├── exchange type: Merge └── Sort - ├── output columns: [numbers.number (#0), numbers.number (#1)] + ├── output columns: [numbers.number (#0), numbers.number (#1), #_order_col] ├── sort keys: [number ASC NULLS LAST] ├── estimated rows: 50.00 └── HashJoin diff --git a/tests/sqllogictests/suites/mode/cluster/distributed_topn.test b/tests/sqllogictests/suites/mode/cluster/distributed_sort.test similarity index 59% rename from tests/sqllogictests/suites/mode/cluster/distributed_topn.test rename to tests/sqllogictests/suites/mode/cluster/distributed_sort.test index 11d45aa803a2..30887966a7bf 100644 --- a/tests/sqllogictests/suites/mode/cluster/distributed_topn.test +++ b/tests/sqllogictests/suites/mode/cluster/distributed_sort.test @@ -1,33 +1,61 @@ statement ok -drop table if exists t_distributed_topn; +drop table if exists t_distributed_sort; statement ok -create table t_distributed_topn (a int not null, b float not null, c string not null, d tuple(a int, b int) not null, e date not null) +create table t_distributed_sort (a int not null, b float not null, c string not null, d tuple(a int, b int) not null, e date not null) + +# Test full sort + +query T +explain select * from t_distributed_sort order by a desc; +---- +Sort +├── output columns: [t_distributed_sort.a (#0), t_distributed_sort.b (#1), t_distributed_sort.c (#2), t_distributed_sort.d (#3), t_distributed_sort.e (#6)] +├── sort keys: [a DESC NULLS LAST] +├── estimated rows: 0.00 +└── Exchange + ├── output columns: [t_distributed_sort.a (#0), t_distributed_sort.b (#1), t_distributed_sort.c (#2), t_distributed_sort.d (#3), t_distributed_sort.e (#6), #_order_col] + ├── exchange type: Merge + └── Sort + ├── output columns: [t_distributed_sort.a (#0), t_distributed_sort.b (#1), t_distributed_sort.c (#2), t_distributed_sort.d (#3), t_distributed_sort.e (#6), #_order_col] + ├── sort keys: [a DESC NULLS LAST] + ├── estimated rows: 0.00 + └── TableScan + ├── table: default.default.t_distributed_sort + ├── output columns: [a (#0), b (#1), c (#2), d (#3), e (#6)] + ├── read rows: 0 + ├── read bytes: 0 + ├── partitions total: 0 + ├── partitions scanned: 0 + ├── push downs: [filters: [], limit: NONE] + └── estimated rows: 0.00 + +# Test TopN statement ok set lazy_read_threshold = 0; query T -explain select * from t_distributed_topn order by a desc limit 2 +explain select * from t_distributed_sort order by a desc limit 2 ---- Limit -├── output columns: [t_distributed_topn.a (#0), t_distributed_topn.b (#1), t_distributed_topn.c (#2), t_distributed_topn.d (#3), t_distributed_topn.e (#6)] +├── output columns: [t_distributed_sort.a (#0), t_distributed_sort.b (#1), t_distributed_sort.c (#2), t_distributed_sort.d (#3), t_distributed_sort.e (#6)] ├── limit: 2 ├── offset: 0 ├── estimated rows: 0.00 └── Sort - ├── output columns: [t_distributed_topn.a (#0), t_distributed_topn.b (#1), t_distributed_topn.c (#2), t_distributed_topn.d (#3), t_distributed_topn.e (#6)] + ├── output columns: [t_distributed_sort.a (#0), t_distributed_sort.b (#1), t_distributed_sort.c (#2), t_distributed_sort.d (#3), t_distributed_sort.e (#6)] ├── sort keys: [a DESC NULLS LAST] ├── estimated rows: 0.00 └── Exchange - ├── output columns: [t_distributed_topn.a (#0), t_distributed_topn.b (#1), t_distributed_topn.c (#2), t_distributed_topn.d (#3), t_distributed_topn.e (#6)] + ├── output columns: [t_distributed_sort.a (#0), t_distributed_sort.b (#1), t_distributed_sort.c (#2), t_distributed_sort.d (#3), t_distributed_sort.e (#6), #_order_col] ├── exchange type: Merge └── Sort - ├── output columns: [t_distributed_topn.a (#0), t_distributed_topn.b (#1), t_distributed_topn.c (#2), t_distributed_topn.d (#3), t_distributed_topn.e (#6)] + ├── output columns: [t_distributed_sort.a (#0), t_distributed_sort.b (#1), t_distributed_sort.c (#2), t_distributed_sort.d (#3), t_distributed_sort.e (#6), #_order_col] ├── sort keys: [a DESC NULLS LAST] ├── estimated rows: 0.00 └── TableScan - ├── table: default.default.t_distributed_topn + ├── table: default.default.t_distributed_sort ├── output columns: [a (#0), b (#1), c (#2), d (#3), e (#6)] ├── read rows: 0 ├── read bytes: 0 @@ -40,30 +68,30 @@ statement ok set lazy_read_threshold = 100; query T -explain select * from t_distributed_topn order by a desc limit 2 +explain select * from t_distributed_sort order by a desc limit 2 ---- RowFetch -├── output columns: [t_distributed_topn.a (#0), t_distributed_topn._row_id (#7), t_distributed_topn.b (#1), t_distributed_topn.c (#2), t_distributed_topn.d (#3), t_distributed_topn.e (#6)] +├── output columns: [t_distributed_sort.a (#0), t_distributed_sort._row_id (#7), t_distributed_sort.b (#1), t_distributed_sort.c (#2), t_distributed_sort.d (#3), t_distributed_sort.e (#6)] ├── columns to fetch: [b, c, d, e] ├── estimated rows: 0.00 └── Limit - ├── output columns: [t_distributed_topn.a (#0), t_distributed_topn._row_id (#7)] + ├── output columns: [t_distributed_sort.a (#0), t_distributed_sort._row_id (#7)] ├── limit: 2 ├── offset: 0 ├── estimated rows: 0.00 └── Sort - ├── output columns: [t_distributed_topn.a (#0), t_distributed_topn._row_id (#7)] + ├── output columns: [t_distributed_sort.a (#0), t_distributed_sort._row_id (#7)] ├── sort keys: [a DESC NULLS LAST] ├── estimated rows: 0.00 └── Exchange - ├── output columns: [t_distributed_topn.a (#0), t_distributed_topn._row_id (#7)] + ├── output columns: [t_distributed_sort.a (#0), t_distributed_sort._row_id (#7), #_order_col] ├── exchange type: Merge └── Sort - ├── output columns: [t_distributed_topn.a (#0), t_distributed_topn._row_id (#7)] + ├── output columns: [t_distributed_sort.a (#0), t_distributed_sort._row_id (#7), #_order_col] ├── sort keys: [a DESC NULLS LAST] ├── estimated rows: 0.00 └── TableScan - ├── table: default.default.t_distributed_topn + ├── table: default.default.t_distributed_sort ├── output columns: [a (#0), _row_id (#7)] ├── read rows: 0 ├── read bytes: 0 diff --git a/tests/sqllogictests/suites/mode/cluster/lazy_read.test b/tests/sqllogictests/suites/mode/cluster/lazy_read.test index 117b1487d426..e62cb16865a6 100644 --- a/tests/sqllogictests/suites/mode/cluster/lazy_read.test +++ b/tests/sqllogictests/suites/mode/cluster/lazy_read.test @@ -21,10 +21,10 @@ RowFetch ├── sort keys: [a DESC NULLS LAST] ├── estimated rows: 0.00 └── Exchange - ├── output columns: [t_lazy.a (#0), t_lazy._row_id (#7)] + ├── output columns: [t_lazy.a (#0), t_lazy._row_id (#7), #_order_col] ├── exchange type: Merge └── Sort - ├── output columns: [t_lazy.a (#0), t_lazy._row_id (#7)] + ├── output columns: [t_lazy.a (#0), t_lazy._row_id (#7), #_order_col] ├── sort keys: [a DESC NULLS LAST] ├── estimated rows: 0.00 └── TableScan @@ -87,10 +87,10 @@ Limit ├── sort keys: [a DESC NULLS LAST] ├── estimated rows: 0.00 └── Exchange - ├── output columns: [t_lazy.a (#0), t_lazy.b (#1), t_lazy.c (#2), t_lazy.d (#3), t_lazy.e (#6)] + ├── output columns: [t_lazy.a (#0), t_lazy.b (#1), t_lazy.c (#2), t_lazy.d (#3), t_lazy.e (#6), #_order_col] ├── exchange type: Merge └── Sort - ├── output columns: [t_lazy.a (#0), t_lazy.b (#1), t_lazy.c (#2), t_lazy.d (#3), t_lazy.e (#6)] + ├── output columns: [t_lazy.a (#0), t_lazy.b (#1), t_lazy.c (#2), t_lazy.d (#3), t_lazy.e (#6), #_order_col] ├── sort keys: [a DESC NULLS LAST] ├── estimated rows: 0.00 └── TableScan From ed1a705e336fa92edd126212140567e6f63c206b Mon Sep 17 00:00:00 2001 From: Andy Lok Date: Tue, 5 Dec 2023 12:34:22 +0800 Subject: [PATCH 12/16] chore(ast): improve experiment dialect (#13923) * chore(ast): improve experiment dialect * fix * fix --- src/query/ast/src/error.rs | 9 +- src/query/ast/src/parser/expr.rs | 39 ++-- src/query/ast/src/parser/query.rs | 110 ++++-------- src/query/ast/src/parser/share.rs | 2 +- src/query/ast/src/parser/stage.rs | 14 +- src/query/ast/src/parser/statement.rs | 12 +- src/query/ast/src/util.rs | 7 +- src/query/ast/tests/it/parser.rs | 7 +- ...imental_expr.txt => experimental-expr.txt} | 168 ++++++++++++++++++ .../ast/tests/it/testdata/expr-error.txt | 14 ++ .../ast/tests/it/testdata/query-error.txt | 26 +++ .../ast/tests/it/testdata/statement-error.txt | 15 ++ .../suites/query/column_position.test | 2 +- 13 files changed, 323 insertions(+), 102 deletions(-) rename src/query/ast/tests/it/testdata/{experimental_expr.txt => experimental-expr.txt} (64%) diff --git a/src/query/ast/src/error.rs b/src/query/ast/src/error.rs index 7a34d666d87c..1e35906cdb42 100644 --- a/src/query/ast/src/error.rs +++ b/src/query/ast/src/error.rs @@ -186,9 +186,14 @@ pub fn display_parser_error(error: Error, source: &str) -> String { let mut labels = vec![]; // Plain text error has the highest priority. Only display it if exists. - for kind in &inner.errors { + for (span, kind) in error + .errors + .iter() + .map(|err| (error.span, err)) + .chain(inner.errors.iter().map(|err| (inner.span, err))) + { if let ErrorKind::Other(msg) = kind { - labels = vec![(inner.span, msg.to_string())]; + labels = vec![(span, msg.to_string())]; break; } } diff --git a/src/query/ast/src/parser/expr.rs b/src/query/ast/src/parser/expr.rs index d227aa610c03..297854129709 100644 --- a/src/query/ast/src/parser/expr.rs +++ b/src/query/ast/src/parser/expr.rs @@ -1070,7 +1070,7 @@ pub fn expr_element(i: Input) -> IResult> { // python style list comprehensions // python: [i for i in range(10) if i%2==0 ] // sql: [i for i in range(10) if i%2 = 0 ] - let list_comprehensions = check_experimental_chain_function( + let list_comprehensions = check_experimental_list_comprehension( true, map( rule! { @@ -1244,9 +1244,13 @@ pub fn column_id(i: Input) -> IResult { alt(( map_res(rule! { ColumnPosition }, |token| { let name = token.text().to_string(); - let pos = name[1..].parse::()?; + let pos = name[1..] + .parse::() + .map_err(|e| nom::Err::Failure(e.into()))?; if pos == 0 { - return Err(ErrorKind::Other("column position must be greater than 0")); + return Err(nom::Err::Failure(ErrorKind::Other( + "column position must be greater than 0", + ))); } Ok(ColumnID::Position(crate::ast::ColumnPosition { pos, @@ -1384,9 +1388,11 @@ pub fn literal_u64(i: Input) -> IResult { rule! { LiteralInteger }, - |token| Ok(u64::from_str_radix(token.text(), 10)?), + |token| u64::from_str_radix(token.text(), 10).map_err(|e| nom::Err::Failure(e.into())), ); - let hex = map_res(literal_hex_str, |lit| Ok(u64::from_str_radix(lit, 16)?)); + let hex = map_res(literal_hex_str, |lit| { + u64::from_str_radix(lit, 16).map_err(|e| nom::Err::Failure(e.into())) + }); rule!( #decimal @@ -1399,16 +1405,18 @@ pub fn literal_number(i: Input) -> IResult { rule! { LiteralInteger }, - |token| parse_uint(token.text(), 10), + |token| parse_uint(token.text(), 10).map_err(nom::Err::Failure), ); - let hex_uint = map_res(literal_hex_str, |str| parse_uint(str, 16)); + let hex_uint = map_res(literal_hex_str, |str| { + parse_uint(str, 16).map_err(nom::Err::Failure) + }); let decimal_float = map_res( rule! { LiteralFloat }, - |token| parse_float(token.text()), + |token| parse_float(token.text()).map_err(nom::Err::Failure), ); rule!( @@ -1436,11 +1444,12 @@ pub fn literal_string(i: Input) -> IResult { .is_some() { let str = &token.text()[1..token.text().len() - 1]; - let unescaped = unescape_string(str, '\'') - .ok_or(ErrorKind::Other("invalid escape or unicode"))?; + let unescaped = unescape_string(str, '\'').ok_or(nom::Err::Failure( + ErrorKind::Other("invalid escape or unicode"), + ))?; Ok(unescaped) } else { - Err(ErrorKind::ExpectToken(QuotedString)) + Err(nom::Err::Error(ErrorKind::ExpectToken(QuotedString))) } }, )(i) @@ -1452,7 +1461,7 @@ pub fn literal_string_eq_ignore_case(s: &str) -> impl FnMut(Input) -> IResult<() if token.text()[1..token.text().len() - 1].eq_ignore_ascii_case(s) { Ok(()) } else { - Err(ErrorKind::ExpectToken(QuotedString)) + Err(nom::Err::Error(ErrorKind::ExpectToken(QuotedString))) } })(i) } @@ -1510,11 +1519,11 @@ pub fn type_name(i: Input) -> IResult { Ok(TypeName::Decimal { precision: precision .try_into() - .map_err(|_| ErrorKind::Other("precision is too large"))?, + .map_err(|_| nom::Err::Failure(ErrorKind::Other("precision is too large")))?, scale: if let Some((_, scale)) = opt_scale { scale .try_into() - .map_err(|_| ErrorKind::Other("scale is too large"))? + .map_err(|_| nom::Err::Failure(ErrorKind::Other("scale is too large")))? } else { 0 }, @@ -1677,7 +1686,7 @@ pub fn map_access(i: Input) -> IResult { return Ok(MapAccessor::DotNumber { key }); } } - Err(ErrorKind::ExpectText(".")) + Err(nom::Err::Error(ErrorKind::ExpectText("."))) }, ); let colon = map( diff --git a/src/query/ast/src/parser/query.rs b/src/query/ast/src/parser/query.rs index dbeb1f1b131a..725b24f0af7f 100644 --- a/src/query/ast/src/parser/query.rs +++ b/src/query/ast/src/parser/query.rs @@ -32,6 +32,7 @@ use crate::parser::statement::hint; use crate::parser::token::*; use crate::rule; use crate::util::*; +use crate::ErrorKind; pub fn query(i: Input) -> IResult { context( @@ -41,7 +42,7 @@ pub fn query(i: Input) -> IResult { } pub fn set_operation(i: Input) -> IResult { - let (rest, set_operation_elements) = rule!(#set_operation_element+)(i)?; + let (rest, set_operation_elements) = rule! { #set_operation_element+ }(i)?; let iter = &mut set_operation_elements.into_iter(); run_pratt_parser(SetOperationParser, iter, rest, i) } @@ -52,13 +53,13 @@ pub enum SetOperationElement { SelectStmt { hints: Option, distinct: bool, - select_list: Box>, - from: Box>, - selection: Box>, + select_list: Vec, + from: Vec, + selection: Option, group_by: Option, - having: Box>, + having: Option, window_list: Option>, - qualify: Box>, + qualify: Option, }, SetOperation { op: SetOperator, @@ -97,84 +98,50 @@ pub fn set_operation_element(i: Input) -> IResult> } }, ); - let select_stmt = map( + let select_stmt = map_res( rule! { - SELECT ~ #hint? ~ DISTINCT? ~ ^#comma_separated_list1(select_target) - ~ ( FROM ~ ^#comma_separated_list1(table_reference) )? - ~ ( WHERE ~ ^#expr )? - ~ ( GROUP ~ ^BY ~ ^#group_by_items )? - ~ ( HAVING ~ ^#expr )? - ~ ( WINDOW ~ ^#comma_separated_list1(window_clause) )? - ~ ( QUALIFY ~ ^#expr )? + ( FROM ~ ^#comma_separated_list1(table_reference) )? + ~ SELECT ~ #hint? ~ DISTINCT? ~ ^#comma_separated_list1(select_target) + ~ ( FROM ~ ^#comma_separated_list1(table_reference) )? + ~ ( WHERE ~ ^#expr )? + ~ ( GROUP ~ ^BY ~ ^#group_by_items )? + ~ ( HAVING ~ ^#expr )? + ~ ( WINDOW ~ ^#comma_separated_list1(window_clause) )? + ~ ( QUALIFY ~ ^#expr )? }, |( + opt_from_block_first, _select, opt_hints, opt_distinct, select_list, - opt_from_block, + opt_from_block_second, opt_where_block, opt_group_by_block, opt_having_block, opt_window_block, opt_qualify_block, )| { - SetOperationElement::SelectStmt { - hints: opt_hints, - distinct: opt_distinct.is_some(), - select_list: Box::new(select_list), - from: Box::new( - opt_from_block - .map(|(_, table_refs)| table_refs) - .unwrap_or_default(), - ), - selection: Box::new(opt_where_block.map(|(_, selection)| selection)), - group_by: opt_group_by_block.map(|(_, _, group_by)| group_by), - having: Box::new(opt_having_block.map(|(_, having)| having)), - window_list: opt_window_block.map(|(_, windows)| windows), - qualify: Box::new(opt_qualify_block.map(|(_, qualify)| qualify)), + if opt_from_block_first.is_some() && opt_from_block_second.is_some() { + return Err(nom::Err::Failure(ErrorKind::Other( + "duplicated FROM clause", + ))); } - }, - ); - // From ... Select - let select_stmt_from_first = map( - rule! { - ( FROM ~ ^#comma_separated_list1(table_reference) )? - ~ SELECT ~ #hint? ~ DISTINCT? ~ ^#comma_separated_list1(select_target) - ~ ( WHERE ~ ^#expr )? - ~ ( GROUP ~ ^BY ~ ^#group_by_items )? - ~ ( HAVING ~ ^#expr )? - ~ ( WINDOW ~ ^#comma_separated_list1(window_clause) )? - ~ ( QUALIFY ~ ^#expr )? - }, - |( - opt_from_block, - _select, - opt_hints, - opt_distinct, - select_list, - opt_where_block, - opt_group_by_block, - opt_having_block, - opt_window_block, - opt_qualify_block, - )| { - SetOperationElement::SelectStmt { + Ok(SetOperationElement::SelectStmt { hints: opt_hints, distinct: opt_distinct.is_some(), - select_list: Box::new(select_list), - from: Box::new( - opt_from_block - .map(|(_, table_refs)| table_refs) - .unwrap_or_default(), - ), - selection: Box::new(opt_where_block.map(|(_, selection)| selection)), + select_list, + from: opt_from_block_first + .or(opt_from_block_second) + .map(|(_, table_refs)| table_refs) + .unwrap_or_default(), + selection: opt_where_block.map(|(_, selection)| selection), group_by: opt_group_by_block.map(|(_, _, group_by)| group_by), - having: Box::new(opt_having_block.map(|(_, having)| having)), + having: opt_having_block.map(|(_, having)| having), window_list: opt_window_block.map(|(_, windows)| windows), - qualify: Box::new(opt_qualify_block.map(|(_, qualify)| qualify)), - } + qualify: opt_qualify_block.map(|(_, qualify)| qualify), + }) }, ); @@ -220,7 +187,6 @@ pub fn set_operation_element(i: Input) -> IResult> | #with | #set_operator | #select_stmt - | #select_stmt_from_first | #values | #order_by | #limit @@ -274,13 +240,13 @@ impl<'a, I: Iterator>> PrattParser span: transform_span(input.span.0), hints, distinct, - select_list: *select_list, - from: *from, - selection: *selection, + select_list, + from, + selection, group_by, - having: *having, + having, window_list, - qualify: *qualify, + qualify, })), SetOperationElement::Values(values) => SetExpr::Values { span: transform_span(input.span.0), @@ -626,7 +592,7 @@ pub fn order_by_expr(i: Input) -> IResult { } pub fn table_reference(i: Input) -> IResult { - let (rest, table_reference_elements) = rule!(#table_reference_element+)(i)?; + let (rest, table_reference_elements) = rule! { #table_reference_element+ }(i)?; let iter = &mut table_reference_elements.into_iter(); run_pratt_parser(TableReferenceParser, iter, rest, i) } diff --git a/src/query/ast/src/parser/share.rs b/src/query/ast/src/parser/share.rs index 01aa1506f2fe..fa9348af1945 100644 --- a/src/query/ast/src/parser/share.rs +++ b/src/query/ast/src/parser/share.rs @@ -28,7 +28,7 @@ pub fn share_endpoint_uri_location(i: Input) -> IResult { }, |location| { UriLocation::from_uri(location, "".to_string(), BTreeMap::new()) - .map_err(|_| ErrorKind::Other("invalid uri")) + .map_err(|_| nom::Err::Failure(ErrorKind::Other("invalid uri"))) }, )(i) } diff --git a/src/query/ast/src/parser/stage.rs b/src/query/ast/src/parser/stage.rs index 7b295f83fd39..a84efa385816 100644 --- a/src/query/ast/src/parser/stage.rs +++ b/src/query/ast/src/parser/stage.rs @@ -165,13 +165,17 @@ pub fn file_location(i: Input) -> IResult { pub fn stage_location(i: Input) -> IResult { map_res(file_location, |location| match location { FileLocation::Stage(s) => Ok(s), - FileLocation::Uri(_) => Err(ErrorKind::Other("expect stage location, got uri location")), + FileLocation::Uri(_) => Err(nom::Err::Failure(ErrorKind::Other( + "expect stage location, got uri location", + ))), })(i) } pub fn uri_location(i: Input) -> IResult { map_res(string_location, |location| match location { - FileLocation::Stage(_) => Err(ErrorKind::Other("uri location should not start with '@'")), + FileLocation::Stage(_) => Err(nom::Err::Failure(ErrorKind::Other( + "uri location should not start with '@'", + ))), FileLocation::Uri(u) => Ok(u), })(i) } @@ -192,7 +196,9 @@ pub fn string_location(i: Input) -> IResult { { Ok(FileLocation::Stage(stripped.to_string())) } else { - Err(ErrorKind::Other("uri location should not start with '@'")) + Err(nom::Err::Failure(ErrorKind::Other( + "uri location should not start with '@'", + ))) } } else { let part_prefix = if let Some((_, _, p, _)) = location_prefix { @@ -207,7 +213,7 @@ pub fn string_location(i: Input) -> IResult { conns.extend(credentials_opts.map(|v| v.2).unwrap_or_default()); let uri = UriLocation::from_uri(location, part_prefix, conns) - .map_err(|_| ErrorKind::Other("invalid uri"))?; + .map_err(|_| nom::Err::Failure(ErrorKind::Other("invalid uri")))?; Ok(FileLocation::Uri(uri)) } }, diff --git a/src/query/ast/src/parser/statement.rs b/src/query/ast/src/parser/statement.rs index c58c22cf2e7f..8cb842ae11eb 100644 --- a/src/query/ast/src/parser/statement.rs +++ b/src/query/ast/src/parser/statement.rs @@ -68,13 +68,17 @@ pub fn statement(i: Input) -> IResult { Ok(Statement::Explain { kind: match opt_kind.map(|token| token.kind) { Some(TokenKind::AST) => { - let formatted_stmt = format_statement(statement.stmt.clone()) - .map_err(|_| ErrorKind::Other("invalid statement"))?; + let formatted_stmt = + format_statement(statement.stmt.clone()).map_err(|_| { + nom::Err::Failure(ErrorKind::Other("invalid statement")) + })?; ExplainKind::Ast(formatted_stmt) } Some(TokenKind::SYNTAX) => { - let pretty_stmt = pretty_statement(statement.stmt.clone(), 10) - .map_err(|_| ErrorKind::Other("invalid statement"))?; + let pretty_stmt = + pretty_statement(statement.stmt.clone(), 10).map_err(|_| { + nom::Err::Failure(ErrorKind::Other("invalid statement")) + })?; ExplainKind::Syntax(pretty_stmt) } Some(TokenKind::PIPELINE) => ExplainKind::Pipeline, diff --git a/src/query/ast/src/util.rs b/src/query/ast/src/util.rs index 860ac2309422..bfabe628735d 100644 --- a/src/query/ast/src/util.rs +++ b/src/query/ast/src/util.rs @@ -318,14 +318,16 @@ pub fn map_res<'a, O1, O2, F, G>( ) -> impl FnMut(Input<'a>) -> IResult<'a, O2> where F: nom::Parser, O1, Error<'a>>, - G: FnMut(O1) -> Result, + G: FnMut(O1) -> Result>, { move |input: Input| { let i = input; let (input, o1) = parser.parse(input)?; match f(o1) { Ok(o2) => Ok((input, o2)), - Err(e) => Err(nom::Err::Error(Error::from_error_kind(i, e))), + Err(nom::Err::Error(e)) => Err(nom::Err::Error(Error::from_error_kind(i, e))), + Err(nom::Err::Failure(e)) => Err(nom::Err::Failure(Error::from_error_kind(i, e))), + Err(nom::Err::Incomplete(_)) => unimplemented!(), } } } @@ -443,3 +445,4 @@ macro_rules! declare_experimental_feature { } declare_experimental_feature!(check_experimental_chain_function, "chain function"); +declare_experimental_feature!(check_experimental_list_comprehension, "list comprehension"); diff --git a/src/query/ast/tests/it/parser.rs b/src/query/ast/tests/it/parser.rs index 30bd93b17e4b..d5cc254802f2 100644 --- a/src/query/ast/tests/it/parser.rs +++ b/src/query/ast/tests/it/parser.rs @@ -623,6 +623,7 @@ fn test_statement_error() { error_on_column_count_mismatch = 1 )"#, r#"CREATE CONNECTION IF NOT EXISTS my_conn"#, + r#"select $0 from t1"#, ]; for case in cases { @@ -704,6 +705,8 @@ fn test_query_error() { let file = &mut mint.new_goldenfile("query-error.txt").unwrap(); let cases = &[ r#"select * from customer join where a = b"#, + r#"from t1 select * from t2"#, + r#"from t1 select * from t2 where a = b"#, r#"select * from join customer"#, r#"select * from customer natural inner join orders on a = b"#, r#"select * order a"#, @@ -815,7 +818,7 @@ fn test_expr() { #[test] fn test_experimental_expr() { let mut mint = Mint::new("tests/it/testdata"); - let file = &mut mint.new_goldenfile("experimental_expr.txt").unwrap(); + let file = &mut mint.new_goldenfile("experimental-expr.txt").unwrap(); let cases = &[ r#"a"#, @@ -825,6 +828,7 @@ fn test_experimental_expr() { r#"1 + {'k1': 4}.k1"#, r#"'3'.plus(4)"#, r#"(3).add({'k1': 4 }.k1)"#, + r#"[ x * 100 FOR x in [1,2,3] if x % 2 = 0 ]"#, ]; for case in cases { @@ -844,6 +848,7 @@ fn test_expr_error() { r#"1 a"#, r#"CAST(col1)"#, r#"a.add(b)"#, + r#"[ x * 100 FOR x in [1,2,3] if x % 2 = 0 ]"#, r#"G.E.B IS NOT NULL AND col1 NOT BETWEEN col2 AND AND 1 + col3 DIV sum(col4)"#, diff --git a/src/query/ast/tests/it/testdata/experimental_expr.txt b/src/query/ast/tests/it/testdata/experimental-expr.txt similarity index 64% rename from src/query/ast/tests/it/testdata/experimental_expr.txt rename to src/query/ast/tests/it/testdata/experimental-expr.txt index 49a8b15a0b24..7583dee4a69e 100644 --- a/src/query/ast/tests/it/testdata/experimental_expr.txt +++ b/src/query/ast/tests/it/testdata/experimental-expr.txt @@ -423,3 +423,171 @@ FunctionCall { } +---------- Input ---------- +[ x * 100 FOR x in [1,2,3] if x % 2 = 0 ] +---------- Output --------- +array_map(array_filter([1, 2, 3], x -> ((x % 2) = 0)), x -> (x * 100)) +---------- AST ------------ +FunctionCall { + span: Some( + 0..41, + ), + distinct: false, + name: Identifier { + name: "array_map", + quote: None, + span: None, + }, + args: [ + FunctionCall { + span: Some( + 0..41, + ), + distinct: false, + name: Identifier { + name: "array_filter", + quote: None, + span: None, + }, + args: [ + Array { + span: Some( + 19..26, + ), + exprs: [ + Literal { + span: Some( + 20..21, + ), + lit: UInt64( + 1, + ), + }, + Literal { + span: Some( + 22..23, + ), + lit: UInt64( + 2, + ), + }, + Literal { + span: Some( + 24..25, + ), + lit: UInt64( + 3, + ), + }, + ], + }, + ], + params: [], + window: None, + lambda: Some( + Lambda { + params: [ + Identifier { + name: "x", + quote: None, + span: Some( + 14..15, + ), + }, + ], + expr: BinaryOp { + span: Some( + 36..37, + ), + op: Eq, + left: BinaryOp { + span: Some( + 32..33, + ), + op: Modulo, + left: ColumnRef { + span: Some( + 30..31, + ), + database: None, + table: None, + column: Name( + Identifier { + name: "x", + quote: None, + span: Some( + 30..31, + ), + }, + ), + }, + right: Literal { + span: Some( + 34..35, + ), + lit: UInt64( + 2, + ), + }, + }, + right: Literal { + span: Some( + 38..39, + ), + lit: UInt64( + 0, + ), + }, + }, + }, + ), + }, + ], + params: [], + window: None, + lambda: Some( + Lambda { + params: [ + Identifier { + name: "x", + quote: None, + span: Some( + 14..15, + ), + }, + ], + expr: BinaryOp { + span: Some( + 4..5, + ), + op: Multiply, + left: ColumnRef { + span: Some( + 2..3, + ), + database: None, + table: None, + column: Name( + Identifier { + name: "x", + quote: None, + span: Some( + 2..3, + ), + }, + ), + }, + right: Literal { + span: Some( + 6..9, + ), + lit: UInt64( + 100, + ), + }, + }, + }, + ), +} + + diff --git a/src/query/ast/tests/it/testdata/expr-error.txt b/src/query/ast/tests/it/testdata/expr-error.txt index 0d40357b074e..622e361d3d24 100644 --- a/src/query/ast/tests/it/testdata/expr-error.txt +++ b/src/query/ast/tests/it/testdata/expr-error.txt @@ -73,6 +73,20 @@ error: | while parsing expression +---------- Input ---------- +[ x * 100 FOR x in [1,2,3] if x % 2 = 0 ] +---------- Output --------- +error: + --> SQL:1:1 + | +1 | [ x * 100 FOR x in [1,2,3] if x % 2 = 0 ] + | ^ + | | + | list comprehension only works in experimental dialect, try `set sql_dialect = experimental` + | while parsing expression + | while parsing [expr for x in ... [if ...]] + + ---------- Input ---------- G.E.B IS NOT NULL AND col1 NOT BETWEEN col2 AND diff --git a/src/query/ast/tests/it/testdata/query-error.txt b/src/query/ast/tests/it/testdata/query-error.txt index bd09b7c29256..a952f70c4876 100644 --- a/src/query/ast/tests/it/testdata/query-error.txt +++ b/src/query/ast/tests/it/testdata/query-error.txt @@ -10,6 +10,32 @@ error: | while parsing `SELECT ...` +---------- Input ---------- +from t1 select * from t2 +---------- Output --------- +error: + --> SQL:1:1 + | +1 | from t1 select * from t2 + | ^^^^ + | | + | duplicated FROM clause + | while parsing `SELECT ...` + + +---------- Input ---------- +from t1 select * from t2 where a = b +---------- Output --------- +error: + --> SQL:1:1 + | +1 | from t1 select * from t2 where a = b + | ^^^^ + | | + | duplicated FROM clause + | while parsing `SELECT ...` + + ---------- Input ---------- select * from join customer ---------- Output --------- diff --git a/src/query/ast/tests/it/testdata/statement-error.txt b/src/query/ast/tests/it/testdata/statement-error.txt index 78abfdf75bea..4807af73684f 100644 --- a/src/query/ast/tests/it/testdata/statement-error.txt +++ b/src/query/ast/tests/it/testdata/statement-error.txt @@ -743,3 +743,18 @@ error: | while parsing `CREATE CONNECTION [IF NOT EXISTS] STORAGE_TYPE = ` +---------- Input ---------- +select $0 from t1 +---------- Output --------- +error: + --> SQL:1:8 + | +1 | select $0 from t1 + | ------ ^^ + | | | + | | column position must be greater than 0 + | | while parsing expression + | | while parsing + | while parsing `SELECT ...` + + diff --git a/tests/sqllogictests/suites/query/column_position.test b/tests/sqllogictests/suites/query/column_position.test index fc87fc04a69c..899452715c05 100644 --- a/tests/sqllogictests/suites/query/column_position.test +++ b/tests/sqllogictests/suites/query/column_position.test @@ -30,7 +30,7 @@ select * from t1 where $1 = 1; ---- 1 a -statement error 1065 +statement error 1005 select $0 from t1 statement error 1065 From 8323496fbdab0302caa7157e81cb12a6d6ba4a38 Mon Sep 17 00:00:00 2001 From: Jk Xu <54522439+Dousir9@users.noreply.github.com> Date: Tue, 5 Dec 2023 13:20:47 +0800 Subject: [PATCH 13/16] feat(query): add TransformFilter (#13922) * add TransformFilter * make lint * Reuse BlockingTransform * remove unused code * remove unused code * fix --- .../src/pipelines/builders/builder_filter.rs | 26 ++--- .../pipelines/processors/transforms/mod.rs | 2 + .../processors/transforms/transform_filter.rs | 95 +++++++++++++++++++ src/query/sql/src/evaluator/block_operator.rs | 25 ----- src/query/sql/src/evaluator/cse.rs | 1 - 5 files changed, 106 insertions(+), 43 deletions(-) create mode 100644 src/query/service/src/pipelines/processors/transforms/transform_filter.rs diff --git a/src/query/service/src/pipelines/builders/builder_filter.rs b/src/query/service/src/pipelines/builders/builder_filter.rs index cec93fff22c6..49193ca1a04f 100644 --- a/src/query/service/src/pipelines/builders/builder_filter.rs +++ b/src/query/service/src/pipelines/builders/builder_filter.rs @@ -17,12 +17,10 @@ use common_exception::Result; use common_expression::type_check::check_function; use common_functions::BUILTIN_FUNCTIONS; use common_pipeline_core::processors::ProcessorPtr; -use common_pipeline_transforms::processors::TransformProfileWrapper; -use common_pipeline_transforms::processors::Transformer; -use common_sql::evaluator::BlockOperator; -use common_sql::evaluator::CompoundBlockOperator; +use common_pipeline_transforms::processors::ProcessorProfileWrapper; use common_sql::executor::physical_plans::Filter; +use crate::pipelines::processors::transforms::TransformFilter; use crate::pipelines::PipelineBuilder; impl PipelineBuilder { @@ -43,29 +41,23 @@ impl PipelineBuilder { )) })?; - let num_input_columns = filter.input.output_schema()?.num_fields(); self.main_pipeline.add_transform(|input, output| { - let transform = CompoundBlockOperator::new( - vec![BlockOperator::Filter { - projections: filter.projections.clone(), - expr: predicate.clone(), - }], + let transform = TransformFilter::create( + input, + output, + predicate.clone(), + filter.projections.clone(), self.func_ctx.clone(), - num_input_columns, ); if self.enable_profiling { - Ok(ProcessorPtr::create(TransformProfileWrapper::create( + Ok(ProcessorPtr::create(ProcessorProfileWrapper::create( transform, - input, - output, filter.plan_id, self.proc_profs.clone(), ))) } else { - Ok(ProcessorPtr::create(Transformer::create( - input, output, transform, - ))) + Ok(ProcessorPtr::create(transform)) } })?; diff --git a/src/query/service/src/pipelines/processors/transforms/mod.rs b/src/query/service/src/pipelines/processors/transforms/mod.rs index 4e328cacf150..91423a8c8b6c 100644 --- a/src/query/service/src/pipelines/processors/transforms/mod.rs +++ b/src/query/service/src/pipelines/processors/transforms/mod.rs @@ -24,6 +24,7 @@ mod transform_add_const_columns; mod transform_add_stream_columns; mod transform_cast_schema; mod transform_create_sets; +mod transform_filter; mod transform_limit; mod transform_materialized_cte; mod transform_merge_block; @@ -45,6 +46,7 @@ pub use transform_add_stream_columns::TransformAddStreamColumns; pub use transform_cast_schema::TransformCastSchema; pub use transform_create_sets::SubqueryReceiver; pub use transform_create_sets::TransformCreateSets; +pub use transform_filter::TransformFilter; pub use transform_limit::TransformLimit; pub use transform_materialized_cte::MaterializedCteSink; pub use transform_materialized_cte::MaterializedCteSource; diff --git a/src/query/service/src/pipelines/processors/transforms/transform_filter.rs b/src/query/service/src/pipelines/processors/transforms/transform_filter.rs new file mode 100644 index 000000000000..167fb84fa198 --- /dev/null +++ b/src/query/service/src/pipelines/processors/transforms/transform_filter.rs @@ -0,0 +1,95 @@ +// Copyright 2021 Datafuse Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use common_catalog::plan::AggIndexMeta; +use common_exception::Result; +use common_expression::types::BooleanType; +use common_expression::BlockMetaInfoDowncast; +use common_expression::DataBlock; +use common_expression::Evaluator; +use common_expression::Expr; +use common_expression::FunctionContext; +use common_functions::BUILTIN_FUNCTIONS; +use common_pipeline_transforms::processors::BlockingTransform; +use common_pipeline_transforms::processors::BlockingTransformer; +use common_sql::optimizer::ColumnSet; + +use crate::pipelines::processors::InputPort; +use crate::pipelines::processors::OutputPort; +use crate::pipelines::processors::Processor; + +/// Filter the input [`DataBlock`] with the predicate `expr`. +pub struct TransformFilter { + expr: Expr, + projections: ColumnSet, + func_ctx: FunctionContext, + output_data: Option, +} + +impl TransformFilter { + pub fn create( + input: Arc, + output: Arc, + expr: Expr, + projections: ColumnSet, + func_ctx: FunctionContext, + ) -> Box { + BlockingTransformer::create(input, output, TransformFilter { + expr, + projections, + func_ctx, + output_data: None, + }) + } +} + +impl BlockingTransform for TransformFilter { + const NAME: &'static str = "TransformFilter"; + + fn consume(&mut self, input: DataBlock) -> Result<()> { + let num_evals = input + .get_meta() + .and_then(AggIndexMeta::downcast_ref_from) + .map(|a| a.num_evals); + + let data_block = if let Some(num_evals) = num_evals { + // It's from aggregating index. + input.project_with_agg_index(&self.projections, num_evals) + } else { + let evaluator = Evaluator::new(&input, &self.func_ctx, &BUILTIN_FUNCTIONS); + let filter = evaluator + .run(&self.expr)? + .try_downcast::() + .unwrap(); + let data_block = input.project(&self.projections); + data_block.filter_boolean_value(&filter)? + }; + + if data_block.num_rows() > 0 { + self.output_data = Some(data_block) + } + + Ok(()) + } + + fn transform(&mut self) -> Result> { + if self.output_data.is_none() { + return Ok(None); + } + let data_block = self.output_data.take().unwrap(); + Ok(Some(data_block)) + } +} diff --git a/src/query/sql/src/evaluator/block_operator.rs b/src/query/sql/src/evaluator/block_operator.rs index fbab079fd4f8..15ba9dbc5797 100644 --- a/src/query/sql/src/evaluator/block_operator.rs +++ b/src/query/sql/src/evaluator/block_operator.rs @@ -16,8 +16,6 @@ use std::sync::Arc; use common_catalog::plan::AggIndexMeta; use common_exception::Result; -use common_expression::types::BooleanType; -use common_expression::types::DataType; use common_expression::BlockEntry; use common_expression::BlockMetaInfoDowncast; use common_expression::DataBlock; @@ -44,9 +42,6 @@ pub enum BlockOperator { projections: Option, }, - /// Filter the input [`DataBlock`] with the predicate `eval`. - Filter { projections: ColumnSet, expr: Expr }, - /// Reorganize the input [`DataBlock`] with `projection`. Project { projection: Vec }, } @@ -85,25 +80,6 @@ impl BlockOperator { } } - BlockOperator::Filter { projections, expr } => { - assert_eq!(expr.data_type(), &DataType::Boolean); - - let num_evals = input - .get_meta() - .and_then(AggIndexMeta::downcast_ref_from) - .map(|a| a.num_evals); - - if let Some(num_evals) = num_evals { - // It's from aggregating index. - Ok(input.project_with_agg_index(projections, num_evals)) - } else { - let evaluator = Evaluator::new(&input, func_ctx, &BUILTIN_FUNCTIONS); - let filter = evaluator.run(expr)?.try_downcast::().unwrap(); - let data_block = input.project(projections); - data_block.filter_boolean_value(&filter) - } - } - BlockOperator::Project { projection } => { let mut result = DataBlock::new(vec![], input.num_rows()); for index in projection { @@ -193,7 +169,6 @@ impl Transform for CompoundBlockOperator { .map(|op| { match op { BlockOperator::Map { .. } => "Map", - BlockOperator::Filter { .. } => "Filter", BlockOperator::Project { .. } => "Project", } .to_string() diff --git a/src/query/sql/src/evaluator/cse.rs b/src/query/sql/src/evaluator/cse.rs index 073f1f54911b..388424e3b2a1 100644 --- a/src/query/sql/src/evaluator/cse.rs +++ b/src/query/sql/src/evaluator/cse.rs @@ -113,7 +113,6 @@ pub fn apply_cse( input_num_columns = projection.len(); results.push(BlockOperator::Project { projection }); } - _ => results.push(op), } } From 35f129c23326477094a18d6da888651535e75c66 Mon Sep 17 00:00:00 2001 From: sundyli <543950155@qq.com> Date: Tue, 5 Dec 2023 05:55:31 -0800 Subject: [PATCH 14/16] chore(query): remove extra error code construction (#13926) * chore(query): remove extra error code construction * chore(query): remove extra error code construction * chore(query): remove extra error code construction * chore(query): add clippy --- clippy.toml | 16 ++++++++ .../arrow/array/dictionary/typed_iterator.rs | 6 +-- .../arrow/src/native/compression/basic.rs | 2 +- src/common/base/src/base/take_mut.rs | 4 +- src/common/base/src/lib.rs | 1 + src/common/base/src/runtime/catch_unwind.rs | 1 + src/common/storage/src/stage.rs | 8 +++- src/meta/api/src/schema_api_impl.rs | 37 +++++++++++-------- src/meta/app/src/principal/file_format.rs | 9 ++--- .../src/schema_from_to_protobuf_impl.rs | 6 +-- .../src/table_from_to_protobuf_impl.rs | 2 +- .../src/meta_service/raft_service_impl.rs | 6 +-- src/query/ast/src/ast/statements/copy.rs | 2 +- src/query/catalog/src/plan/internal_column.rs | 6 +-- src/query/catalog/src/plan/stream_column.rs | 6 +-- src/query/ee/src/stream/handler.rs | 6 +-- src/query/expression/src/type_check.rs | 10 ++--- src/query/expression/src/types/number.rs | 5 +-- src/query/expression/src/utils/date_helper.rs | 20 +++++----- .../transforms/transform_sort_merge.rs | 4 +- .../api/rpc/exchange/exchange_sink_writer.rs | 6 +-- src/query/service/src/auth.rs | 2 +- .../src/interpreters/interpreter_delete.rs | 15 ++++---- .../interpreters/interpreter_merge_into.rs | 16 ++++---- .../interpreter_merge_into_static_filter.rs | 16 ++++---- .../src/interpreters/interpreter_replace.rs | 16 ++++---- .../interpreter_table_modify_column.rs | 4 +- .../src/interpreters/interpreter_update.rs | 15 ++++---- .../transform_add_stream_columns.rs | 2 +- .../fragments/query_fragment_actions.rs | 8 ++-- .../flight_sql/flight_sql_service/session.rs | 2 +- .../infer_schema/table_args.rs | 5 +-- .../table_functions/list_stage/table_args.rs | 2 +- .../table_functions/numbers/numbers_part.rs | 4 +- src/query/service/tests/it/sql/exec/mod.rs | 24 +++++++----- .../physical_plans/physical_eval_scalar.rs | 2 +- .../physical_plans/physical_filter.rs | 2 +- .../physical_plans/physical_hash_join.rs | 10 ++--- .../physical_plans/physical_project_set.rs | 2 +- src/query/sql/src/planner/binder/table.rs | 2 +- .../storages/common/locks/src/lock_manager.rs | 2 +- .../storages/common/pruner/src/block_meta.rs | 6 +-- src/query/storages/fuse/src/fuse_part.rs | 4 +- .../read/block/block_reader_merge_io_async.rs | 2 +- .../read/block/block_reader_merge_io_sync.rs | 2 +- .../storages/fuse/src/io/write/meta_writer.rs | 2 +- .../common/processors/sink_commit.rs | 4 +- .../processors/transform_serialize_block.rs | 4 +- .../processors/transform_serialize_segment.rs | 4 +- .../operations/common/snapshot_generator.rs | 4 +- .../mutation/compact/compact_part.rs | 4 +- .../src/operations/mutation/mutation_part.rs | 4 +- .../storages/fuse/src/operations/navigate.rs | 6 +-- .../clustering_information.rs | 2 +- .../hive/hive/src/hive_block_filter.rs | 7 ++-- .../storages/hive/hive/src/hive_partition.rs | 4 +- .../storages/hive/hive/src/hive_table.rs | 11 ++---- src/query/storages/iceberg/src/partition.rs | 4 +- .../storages/parquet/src/parquet_part.rs | 4 +- .../src/parquet_rs/parquet_reader/utils.rs | 8 ++-- src/query/storages/random/src/random_parts.rs | 4 +- src/query/storages/system/src/log_queue.rs | 2 +- src/query/users/src/jwt/jwk.rs | 10 ++--- 63 files changed, 208 insertions(+), 208 deletions(-) create mode 100644 clippy.toml diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 000000000000..d4bed8fb3f98 --- /dev/null +++ b/clippy.toml @@ -0,0 +1,16 @@ +disallowed-methods = [ + { path = "std::panic::catch_unwind", reason = "Please use `common_base::runtime::catch_unwind` instead." }, + { path = "futures::FutureExt::catch_unwind", reason = "Please use `common_base::runtime::CatchUnwindFuture` instead." }, + { path = "num_traits::sign::Signed::is_positive", reason = "This returns true for 0.0 but false for 0." }, + { path = "num_traits::sign::Signed::is_negative", reason = "This returns true for -0.0 but false for 0." }, + { path = "num_traits::sign::Signed::signum", reason = "This returns 1.0 for 0.0 but 0 for 0." } +] + +## TODO: enable it in next pr +# disallowed-macros = [ +# { path = "lazy_static::lazy_static", reason = "Please use `std::sync::LazyLock` instead." }, +# ] + +avoid-breaking-exported-api = true +too-many-arguments-threshold = 10 +upper-case-acronyms-aggressive = false \ No newline at end of file diff --git a/src/common/arrow/src/arrow/array/dictionary/typed_iterator.rs b/src/common/arrow/src/arrow/array/dictionary/typed_iterator.rs index 31cc22a5232c..9649053ff84c 100644 --- a/src/common/arrow/src/arrow/array/dictionary/typed_iterator.rs +++ b/src/common/arrow/src/arrow/array/dictionary/typed_iterator.rs @@ -47,9 +47,9 @@ impl DictValue for Utf8Array { array .as_any() .downcast_ref::() - .ok_or(Error::InvalidArgumentError( - "could not convert array to dictionary value".into(), - )) + .ok_or_else(|| { + Error::InvalidArgumentError("could not convert array to dictionary value".into()) + }) .map(|arr| { assert_eq!( arr.null_count(), diff --git a/src/common/arrow/src/native/compression/basic.rs b/src/common/arrow/src/native/compression/basic.rs index 6759786f68e9..77e6af20cdc4 100644 --- a/src/common/arrow/src/native/compression/basic.rs +++ b/src/common/arrow/src/native/compression/basic.rs @@ -47,7 +47,7 @@ impl TryFrom<&Compression> for CommonCompression { } impl CommonCompression { - pub fn to_compression(&self) -> Compression { + pub fn to_compression(self) -> Compression { match self { Self::None => Compression::None, Self::Lz4 => Compression::Lz4, diff --git a/src/common/base/src/base/take_mut.rs b/src/common/base/src/base/take_mut.rs index 6023e7f4e2a3..a75eba1b4d2d 100644 --- a/src/common/base/src/base/take_mut.rs +++ b/src/common/base/src/base/take_mut.rs @@ -16,6 +16,8 @@ use std::panic; use common_exception::Result; +use crate::runtime::catch_unwind; + /// copy from https://docs.rs/take_mut/0.2.2/take_mut/fn.take.html with some modifications. /// if a panic occurs, the entire process will be aborted, as there's no valid `T` to put back into the `&mut T`. pub fn take_mut(mut_ref: &mut T, closure: F) -> Result<()> @@ -24,7 +26,7 @@ where F: FnOnce(T) -> Result { unsafe { let old_t = ptr::read(mut_ref); - let closure_result = panic::catch_unwind(panic::AssertUnwindSafe(|| closure(old_t))); + let closure_result = catch_unwind(panic::AssertUnwindSafe(|| closure(old_t))); match closure_result { Ok(Ok(new_t)) => { diff --git a/src/common/base/src/lib.rs b/src/common/base/src/lib.rs index b0d96788814f..cabdbf58a562 100644 --- a/src/common/base/src/lib.rs +++ b/src/common/base/src/lib.rs @@ -23,6 +23,7 @@ #![feature(backtrace_frames)] #![feature(alloc_error_hook)] #![feature(slice_swap_unchecked)] +#![feature(lint_reasons)] pub mod base; pub mod containers; diff --git a/src/common/base/src/runtime/catch_unwind.rs b/src/common/base/src/runtime/catch_unwind.rs index 13c0605f9a87..067030416f0a 100644 --- a/src/common/base/src/runtime/catch_unwind.rs +++ b/src/common/base/src/runtime/catch_unwind.rs @@ -23,6 +23,7 @@ use futures::future::BoxFuture; use futures::FutureExt; pub fn catch_unwind R, R>(f: F) -> Result { + #[expect(clippy::disallowed_methods)] match std::panic::catch_unwind(std::panic::AssertUnwindSafe(f)) { Ok(res) => Ok(res), Err(cause) => match cause.downcast_ref::<&'static str>() { diff --git a/src/common/storage/src/stage.rs b/src/common/storage/src/stage.rs index 74695c671a44..ccd67fe67d01 100644 --- a/src/common/storage/src/stage.rs +++ b/src/common/storage/src/stage.rs @@ -151,12 +151,16 @@ impl StageFilesInfo { #[async_backtrace::framed] pub async fn first_file(&self, operator: &Operator) -> Result { let mut files = self.list(operator, true, None).await?; - files.pop().ok_or(ErrorCode::BadArguments("no file found")) + files + .pop() + .ok_or_else(|| ErrorCode::BadArguments("no file found")) } pub fn blocking_first_file(&self, operator: &Operator) -> Result { let mut files = self.blocking_list(operator, true, None)?; - files.pop().ok_or(ErrorCode::BadArguments("no file found")) + files + .pop() + .ok_or_else(|| ErrorCode::BadArguments("no file found")) } pub fn blocking_list( diff --git a/src/meta/api/src/schema_api_impl.rs b/src/meta/api/src/schema_api_impl.rs index 39f4fd343dab..d798ffa037f1 100644 --- a/src/meta/api/src/schema_api_impl.rs +++ b/src/meta/api/src/schema_api_impl.rs @@ -552,18 +552,19 @@ impl + ?Sized> SchemaApi for KV { UndropDbHasNoHistory::new(&name_key.db_name), ))); } else { - db_id_list_opt.ok_or(KVAppError::AppError(AppError::UndropDbHasNoHistory( - UndropDbHasNoHistory::new(&name_key.db_name), - )))? + db_id_list_opt.ok_or_else(|| { + KVAppError::AppError(AppError::UndropDbHasNoHistory(UndropDbHasNoHistory::new( + &name_key.db_name, + ))) + })? }; // Return error if there is no db id history. - let db_id = - *db_id_list - .last() - .ok_or(KVAppError::AppError(AppError::UndropDbHasNoHistory( - UndropDbHasNoHistory::new(&name_key.db_name), - )))?; + let db_id = *db_id_list.last().ok_or_else(|| { + KVAppError::AppError(AppError::UndropDbHasNoHistory(UndropDbHasNoHistory::new( + &name_key.db_name, + ))) + })?; // get db_meta of the last db id let dbid = DatabaseId { db_id }; @@ -1745,9 +1746,11 @@ impl + ?Sized> SchemaApi for KV { UndropTableHasNoHistory::new(&tenant_dbname_tbname.table_name), ))); } else { - tb_id_list_opt.ok_or(KVAppError::AppError(AppError::UndropTableHasNoHistory( - UndropTableHasNoHistory::new(&tenant_dbname_tbname.table_name), - )))? + tb_id_list_opt.ok_or_else(|| { + KVAppError::AppError(AppError::UndropTableHasNoHistory( + UndropTableHasNoHistory::new(&tenant_dbname_tbname.table_name), + )) + })? }; // Return error if there is no table id history. @@ -2339,10 +2342,12 @@ impl + ?Sized> SchemaApi for KV { let (_, table_name_opt): (_, Option) = get_pb_value(self, &table_id_to_name).await?; - let dbid_tbname = - table_name_opt.ok_or(KVAppError::AppError(AppError::UnknownTableId( - UnknownTableId::new(table_id, "drop_table_by_id failed to find db_id"), - )))?; + let dbid_tbname = table_name_opt.ok_or_else(|| { + KVAppError::AppError(AppError::UnknownTableId(UnknownTableId::new( + table_id, + "drop_table_by_id failed to find db_id", + ))) + })?; let db_id = dbid_tbname.db_id; let tbname = dbid_tbname.table_name.clone(); diff --git a/src/meta/app/src/principal/file_format.rs b/src/meta/app/src/principal/file_format.rs index 823c962e6fdd..f6cb73cce4bc 100644 --- a/src/meta/app/src/principal/file_format.rs +++ b/src/meta/app/src/principal/file_format.rs @@ -63,13 +63,12 @@ impl FileFormatOptionsAst { fn take_type(&mut self) -> Result { let typ = match self.options.remove("type") { Some(t) => t, - None => self - .options - .remove("format") - .ok_or(ErrorCode::IllegalFileFormat(format!( + None => self.options.remove("format").ok_or_else(|| { + ErrorCode::IllegalFileFormat(format!( "Missing type in file format options: {:?}", self.options - )))?, + )) + })?, }; StageFileFormatType::from_str(&typ).map_err(ErrorCode::IllegalFileFormat) } diff --git a/src/meta/proto-conv/src/schema_from_to_protobuf_impl.rs b/src/meta/proto-conv/src/schema_from_to_protobuf_impl.rs index 6173f2ef3660..656bac1e4b5f 100644 --- a/src/meta/proto-conv/src/schema_from_to_protobuf_impl.rs +++ b/src/meta/proto-conv/src/schema_from_to_protobuf_impl.rs @@ -116,7 +116,7 @@ impl FromToProto for ex::ComputedExpr { fn from_pb(p: pb::ComputedExpr) -> Result { reader_check_msg(p.ver, p.min_reader_ver)?; - let computed_expr = p.computed_expr.ok_or(Incompatible { + let computed_expr = p.computed_expr.ok_or_else(|| Incompatible { reason: "Invalid ComputedExpr: .computed_expr can not be None".to_string(), })?; @@ -335,7 +335,7 @@ impl FromToProto for ex::types::NumberDataType { fn from_pb(p: pb::Number) -> Result { reader_check_msg(p.ver, p.min_reader_ver)?; - let num = p.num.ok_or(Incompatible { + let num = p.num.ok_or_else(|| Incompatible { reason: "Invalid Number: .num can not be None".to_string(), })?; @@ -386,7 +386,7 @@ impl FromToProto for ex::types::DecimalDataType { fn from_pb(p: pb::Decimal) -> Result { reader_check_msg(p.ver, p.min_reader_ver)?; - let num = p.decimal.ok_or(Incompatible { + let num = p.decimal.ok_or_else(|| Incompatible { reason: "Invalid Decimal: .decimal can not be None".to_string(), })?; diff --git a/src/meta/proto-conv/src/table_from_to_protobuf_impl.rs b/src/meta/proto-conv/src/table_from_to_protobuf_impl.rs index 9e408f3faa70..f413fe31bab6 100644 --- a/src/meta/proto-conv/src/table_from_to_protobuf_impl.rs +++ b/src/meta/proto-conv/src/table_from_to_protobuf_impl.rs @@ -175,7 +175,7 @@ impl FromToProto for mt::TableMeta { fn from_pb(p: pb::TableMeta) -> Result { reader_check_msg(p.ver, p.min_reader_ver)?; - let schema = p.schema.ok_or(Incompatible { + let schema = p.schema.ok_or_else(|| Incompatible { reason: "TableMeta.schema can not be None".to_string(), })?; diff --git a/src/meta/service/src/meta_service/raft_service_impl.rs b/src/meta/service/src/meta_service/raft_service_impl.rs index 2fff7a740f72..ccae1033df1c 100644 --- a/src/meta/service/src/meta_service/raft_service_impl.rs +++ b/src/meta/service/src/meta_service/raft_service_impl.rs @@ -96,9 +96,9 @@ impl RaftServiceImpl { let _g = snapshot_recv_inflight(&addr).counter_guard(); - let chunk = snapshot_req.chunk.ok_or(GrpcHelper::invalid_arg( - "SnapshotChunkRequest.chunk is None", - ))?; + let chunk = snapshot_req + .chunk + .ok_or_else(|| GrpcHelper::invalid_arg("SnapshotChunkRequest.chunk is None"))?; let (vote, snapshot_meta): (Vote, SnapshotMeta) = GrpcHelper::parse(&snapshot_req.rpc_meta)?; diff --git a/src/query/ast/src/ast/statements/copy.rs b/src/query/ast/src/ast/statements/copy.rs index 8e3ffa967658..218598b5ae38 100644 --- a/src/query/ast/src/ast/statements/copy.rs +++ b/src/query/ast/src/ast/statements/copy.rs @@ -413,7 +413,7 @@ impl UriLocation { hostname.to_string() } }) - .ok_or(common_exception::ErrorCode::BadArguments("invalid uri"))?; + .ok_or_else(|| common_exception::ErrorCode::BadArguments("invalid uri"))?; let path = if parsed.path().is_empty() { "/".to_string() diff --git a/src/query/catalog/src/plan/internal_column.rs b/src/query/catalog/src/plan/internal_column.rs index 33a562b953e3..82c8eaebd86d 100644 --- a/src/query/catalog/src/plan/internal_column.rs +++ b/src/query/catalog/src/plan/internal_column.rs @@ -109,9 +109,9 @@ impl BlockMetaInfo for InternalColumnMeta { impl InternalColumnMeta { pub fn from_meta(info: &BlockMetaInfoPtr) -> Result<&InternalColumnMeta> { - InternalColumnMeta::downcast_ref_from(info).ok_or(ErrorCode::Internal( - "Cannot downcast from BlockMetaInfo to InternalColumnMeta.", - )) + InternalColumnMeta::downcast_ref_from(info).ok_or_else(|| { + ErrorCode::Internal("Cannot downcast from BlockMetaInfo to InternalColumnMeta.") + }) } } diff --git a/src/query/catalog/src/plan/stream_column.rs b/src/query/catalog/src/plan/stream_column.rs index 43e9e6637c22..a946f42ae594 100644 --- a/src/query/catalog/src/plan/stream_column.rs +++ b/src/query/catalog/src/plan/stream_column.rs @@ -111,9 +111,9 @@ impl BlockMetaInfo for StreamColumnMeta { impl StreamColumnMeta { pub fn from_meta(info: &BlockMetaInfoPtr) -> Result<&StreamColumnMeta> { - StreamColumnMeta::downcast_ref_from(info).ok_or(ErrorCode::Internal( - "Cannot downcast from BlockMetaInfo to StreamColumnMeta.", - )) + StreamColumnMeta::downcast_ref_from(info).ok_or_else(|| { + ErrorCode::Internal("Cannot downcast from BlockMetaInfo to StreamColumnMeta.") + }) } pub fn build_origin_block_id(&self) -> Value { diff --git a/src/query/ee/src/stream/handler.rs b/src/query/ee/src/stream/handler.rs index 5a00df6150ec..4cb0cc0c20d5 100644 --- a/src/query/ee/src/stream/handler.rs +++ b/src/query/ee/src/stream/handler.rs @@ -90,13 +90,13 @@ impl StreamHandler for RealStreamHandler { let stream_opts = stream.get_table_info().options(); let stream_table_name = stream_opts .get(OPT_KEY_TABLE_NAME) - .ok_or(ErrorCode::IllegalStream(format!("Illegal stream '{name}'")))?; + .ok_or_else(|| ErrorCode::IllegalStream(format!("Illegal stream '{name}'")))?; let stream_database_name = stream_opts .get(OPT_KEY_DATABASE_NAME) - .ok_or(ErrorCode::IllegalStream(format!("Illegal stream '{name}'")))?; + .ok_or_else(|| ErrorCode::IllegalStream(format!("Illegal stream '{name}'")))?; let stream_table_id = stream_opts .get(OPT_KEY_TABLE_ID) - .ok_or(ErrorCode::IllegalStream(format!("Illegal stream '{name}'")))? + .ok_or_else(|| ErrorCode::IllegalStream(format!("Illegal stream '{name}'")))? .parse::()?; if stream_table_name != &plan.table_name || stream_database_name != &plan.table_database diff --git a/src/query/expression/src/type_check.rs b/src/query/expression/src/type_check.rs index fa79c55d4eb4..1ad545dbddb7 100755 --- a/src/query/expression/src/type_check.rs +++ b/src/query/expression/src/type_check.rs @@ -446,13 +446,11 @@ pub fn try_check_function( .map(|max_generic_idx| { (0..max_generic_idx + 1) .map(|idx| { - subst - .0 - .get(&idx) - .cloned() - .ok_or(ErrorCode::from_string_no_backtrace(format!( + subst.0.get(&idx).cloned().ok_or_else(|| { + ErrorCode::from_string_no_backtrace(format!( "unable to resolve generic T{idx}" - ))) + )) + }) }) .collect::>>() }) diff --git a/src/query/expression/src/types/number.rs b/src/query/expression/src/types/number.rs index 1fd66320fa6e..728a9335f4f4 100644 --- a/src/query/expression/src/types/number.rs +++ b/src/query/expression/src/types/number.rs @@ -21,7 +21,6 @@ use enum_as_inner::EnumAsInner; use itertools::Itertools; use lexical_core::ToLexicalWithOptions; use num_traits::NumCast; -use num_traits::Signed; use ordered_float::OrderedFloat; use serde::Deserialize; use serde::Serialize; @@ -479,8 +478,8 @@ impl NumberScalar { pub fn is_positive(&self) -> bool { crate::with_integer_mapped_type!(|NUM_TYPE| match self { NumberScalar::NUM_TYPE(num) => *num > 0, - NumberScalar::Float32(num) => num.is_positive(), - NumberScalar::Float64(num) => num.is_positive(), + NumberScalar::Float32(num) => num.is_sign_positive(), + NumberScalar::Float64(num) => num.is_sign_positive(), }) } diff --git a/src/query/expression/src/utils/date_helper.rs b/src/query/expression/src/utils/date_helper.rs index 8362e4d89d36..a9227c6916ae 100644 --- a/src/query/expression/src/utils/date_helper.rs +++ b/src/query/expression/src/utils/date_helper.rs @@ -275,10 +275,8 @@ fn add_years_base(year: i32, month: u32, day: u32, delta: i64) -> Result Result { @@ -296,12 +294,14 @@ fn add_months_base(year: i32, month: u32, day: u32, delta: i64) -> Result>(); if blocks.len() == 1 { - let block = blocks.get_mut(0).ok_or(ErrorCode::Internal("It's a bug"))?; + let block = blocks + .get_mut(0) + .ok_or_else(|| ErrorCode::Internal("It's a bug"))?; if self.order_col_generated { // Need to remove order column. block.pop_columns(1); diff --git a/src/query/service/src/api/rpc/exchange/exchange_sink_writer.rs b/src/query/service/src/api/rpc/exchange/exchange_sink_writer.rs index 536e151cc1f0..304e858fa09e 100644 --- a/src/query/service/src/api/rpc/exchange/exchange_sink_writer.rs +++ b/src/query/service/src/api/rpc/exchange/exchange_sink_writer.rs @@ -75,9 +75,9 @@ impl AsyncSink for ExchangeWriterSink { None => Err(ErrorCode::Internal( "ExchangeWriterSink only recv ExchangeSerializeMeta.", )), - Some(block_meta) => ExchangeSerializeMeta::downcast_from(block_meta).ok_or( - ErrorCode::Internal("ExchangeWriterSink only recv ExchangeSerializeMeta."), - ), + Some(block_meta) => ExchangeSerializeMeta::downcast_from(block_meta).ok_or_else(|| { + ErrorCode::Internal("ExchangeWriterSink only recv ExchangeSerializeMeta.") + }), }?; let mut bytes = 0; diff --git a/src/query/service/src/auth.rs b/src/query/service/src/auth.rs index 4fd3a4cb1680..c961af3b2eab 100644 --- a/src/query/service/src/auth.rs +++ b/src/query/service/src/auth.rs @@ -108,7 +108,7 @@ impl AuthMgr { let ensure_user = jwt .custom .ensure_user - .ok_or(ErrorCode::AuthenticateFailure(e.message()))?; + .ok_or_else(|| ErrorCode::AuthenticateFailure(e.message()))?; // create a new user if not exists let mut user_info = UserInfo::new(&user_name, "%", AuthInfo::JWT); if let Some(ref roles) = ensure_user.roles { diff --git a/src/query/service/src/interpreters/interpreter_delete.rs b/src/query/service/src/interpreters/interpreter_delete.rs index 9364179a2670..17fa5ce66d75 100644 --- a/src/query/service/src/interpreters/interpreter_delete.rs +++ b/src/query/service/src/interpreters/interpreter_delete.rs @@ -186,14 +186,13 @@ impl Interpreter for DeleteInterpreter { (None, vec![]) }; - let fuse_table = - tbl.as_any() - .downcast_ref::() - .ok_or(ErrorCode::Unimplemented(format!( - "table {}, engine type {}, does not support DELETE FROM", - tbl.name(), - tbl.get_table_info().engine(), - )))?; + let fuse_table = tbl.as_any().downcast_ref::().ok_or_else(|| { + ErrorCode::Unimplemented(format!( + "table {}, engine type {}, does not support DELETE FROM", + tbl.name(), + tbl.get_table_info().engine(), + )) + })?; let mut build_res = PipelineBuildResult::create(); let query_row_id_col = !self.plan.subquery_desc.is_empty(); diff --git a/src/query/service/src/interpreters/interpreter_merge_into.rs b/src/query/service/src/interpreters/interpreter_merge_into.rs index 1257b9c41845..dd37d8bd57e1 100644 --- a/src/query/service/src/interpreters/interpreter_merge_into.rs +++ b/src/query/service/src/interpreters/interpreter_merge_into.rs @@ -232,15 +232,13 @@ impl MergeIntoInterpreter { } let table = self.ctx.get_table(catalog, database, &table_name).await?; - let fuse_table = - table - .as_any() - .downcast_ref::() - .ok_or(ErrorCode::Unimplemented(format!( - "table {}, engine type {}, does not support MERGE INTO", - table.name(), - table.get_table_info().engine(), - )))?; + let fuse_table = table.as_any().downcast_ref::().ok_or_else(|| { + ErrorCode::Unimplemented(format!( + "table {}, engine type {}, does not support MERGE INTO", + table.name(), + table.get_table_info().engine(), + )) + })?; let table_info = fuse_table.get_table_info().clone(); let catalog_ = self.ctx.get_catalog(catalog).await?; diff --git a/src/query/service/src/interpreters/interpreter_merge_into_static_filter.rs b/src/query/service/src/interpreters/interpreter_merge_into_static_filter.rs index aaadf3f1ff9d..6c37e519a660 100644 --- a/src/query/service/src/interpreters/interpreter_merge_into_static_filter.rs +++ b/src/query/service/src/interpreters/interpreter_merge_into_static_filter.rs @@ -124,15 +124,13 @@ impl MergeIntoInterpreter { } let column_map = m_join.collect_column_map(); - let fuse_table = - table - .as_any() - .downcast_ref::() - .ok_or(ErrorCode::Unimplemented(format!( - "table {}, engine type {}, does not support MERGE INTO", - table.name(), - table.get_table_info().engine(), - )))?; + let fuse_table = table.as_any().downcast_ref::().ok_or_else(|| { + ErrorCode::Unimplemented(format!( + "table {}, engine type {}, does not support MERGE INTO", + table.name(), + table.get_table_info().engine(), + )) + })?; let group_expr = match fuse_table.cluster_key_str() { None => { diff --git a/src/query/service/src/interpreters/interpreter_replace.rs b/src/query/service/src/interpreters/interpreter_replace.rs index 7c5999ca1f89..03649660b925 100644 --- a/src/query/service/src/interpreters/interpreter_replace.rs +++ b/src/query/service/src/interpreters/interpreter_replace.rs @@ -169,15 +169,13 @@ impl ReplaceInterpreter { field_index, }) } - let fuse_table = - table - .as_any() - .downcast_ref::() - .ok_or(ErrorCode::Unimplemented(format!( - "table {}, engine type {}, does not support REPLACE INTO", - table.name(), - table.get_table_info().engine(), - )))?; + let fuse_table = table.as_any().downcast_ref::().ok_or_else(|| { + ErrorCode::Unimplemented(format!( + "table {}, engine type {}, does not support REPLACE INTO", + table.name(), + table.get_table_info().engine(), + )) + })?; let table_info = fuse_table.get_table_info(); let base_snapshot = fuse_table.read_table_snapshot().await?.unwrap_or_else(|| { diff --git a/src/query/service/src/interpreters/interpreter_table_modify_column.rs b/src/query/service/src/interpreters/interpreter_table_modify_column.rs index 88aad21fe5d5..539dce496147 100644 --- a/src/query/service/src/interpreters/interpreter_table_modify_column.rs +++ b/src/query/service/src/interpreters/interpreter_table_modify_column.rs @@ -198,7 +198,7 @@ impl ModifyTableColumnInterpreter { for (field, _comment) in field_and_comments { let column = &field.name.to_string(); let data_type = &field.data_type; - if let Ok(i) = schema.index_of(column) { + if let Some((i, _)) = schema.column_with_name(column) { if let Some(default_expr) = &field.default_expr { let default_expr = default_expr.to_string(); new_schema.fields[i].data_type = data_type.clone(); @@ -235,7 +235,7 @@ impl ModifyTableColumnInterpreter { for (field, comment) in field_and_comments { let column = &field.name.to_string(); let data_type = &field.data_type; - if let Ok(i) = schema.index_of(column) { + if let Some((i, _)) = schema.column_with_name(column) { if data_type != &new_schema.fields[i].data_type { // Check if this column is referenced by computed columns. let mut data_schema: DataSchema = table_info.schema().into(); diff --git a/src/query/service/src/interpreters/interpreter_update.rs b/src/query/service/src/interpreters/interpreter_update.rs index 2e341e3b0bda..dffaeb31eaae 100644 --- a/src/query/service/src/interpreters/interpreter_update.rs +++ b/src/query/service/src/interpreters/interpreter_update.rs @@ -190,14 +190,13 @@ impl Interpreter for UpdateInterpreter { .check_enterprise_enabled(self.ctx.get_license_key(), ComputedColumn)?; } - let fuse_table = - tbl.as_any() - .downcast_ref::() - .ok_or(ErrorCode::Unimplemented(format!( - "table {}, engine type {}, does not support UPDATE", - tbl.name(), - tbl.get_table_info().engine(), - )))?; + let fuse_table = tbl.as_any().downcast_ref::().ok_or_else(|| { + ErrorCode::Unimplemented(format!( + "table {}, engine type {}, does not support UPDATE", + tbl.name(), + tbl.get_table_info().engine(), + )) + })?; let mut build_res = PipelineBuildResult::create(); let query_row_id_col = !self.plan.subquery_desc.is_empty(); diff --git a/src/query/service/src/pipelines/processors/transforms/transform_add_stream_columns.rs b/src/query/service/src/pipelines/processors/transforms/transform_add_stream_columns.rs index 190282c2f009..1e52985e5618 100644 --- a/src/query/service/src/pipelines/processors/transforms/transform_add_stream_columns.rs +++ b/src/query/service/src/pipelines/processors/transforms/transform_add_stream_columns.rs @@ -60,7 +60,7 @@ impl Transform for TransformAddStreamColumns { if num_rows != 0 { if let Some(meta) = block.take_meta() { let meta = StreamColumnMeta::downcast_from(meta) - .ok_or(ErrorCode::Internal("It's a bug"))?; + .ok_or_else(|| ErrorCode::Internal("It's a bug"))?; for stream_column in self.stream_columns.iter() { let entry = stream_column.generate_column_values(&meta, num_rows); diff --git a/src/query/service/src/schedulers/fragments/query_fragment_actions.rs b/src/query/service/src/schedulers/fragments/query_fragment_actions.rs index 11fe12ac88c3..b6cddd563ce2 100644 --- a/src/query/service/src/schedulers/fragments/query_fragment_actions.rs +++ b/src/query/service/src/schedulers/fragments/query_fragment_actions.rs @@ -130,9 +130,11 @@ impl QueryFragmentsActions { } pub fn get_root_actions(&self) -> Result<&QueryFragmentActions> { - self.fragments_actions.last().ok_or(ErrorCode::Internal( - "Logical error, call get_root_actions in empty QueryFragmentsActions", - )) + self.fragments_actions.last().ok_or_else(|| { + ErrorCode::Internal( + "Logical error, call get_root_actions in empty QueryFragmentsActions", + ) + }) } pub fn pop_root_actions(&mut self) -> Option { diff --git a/src/query/service/src/servers/flight_sql/flight_sql_service/session.rs b/src/query/service/src/servers/flight_sql/flight_sql_service/session.rs index ac5d9f54143e..272a52384a5d 100644 --- a/src/query/service/src/servers/flight_sql/flight_sql_service/session.rs +++ b/src/query/service/src/servers/flight_sql/flight_sql_service/session.rs @@ -64,7 +64,7 @@ impl FlightSqlServiceImpl { pub(super) fn get_user_password(metadata: &MetadataMap) -> Result<(String, String), String> { let basic = "Basic "; let authorization = Self::get_header_value(metadata, "authorization") - .ok_or_else(|| "authorization not parsable".to_string())?; + .ok_or("authorization not parsable".to_string())?; if !authorization.starts_with(basic) { return Err(format!("Auth type not implemented: {authorization}")); diff --git a/src/query/service/src/table_functions/infer_schema/table_args.rs b/src/query/service/src/table_functions/infer_schema/table_args.rs index ce01efc219c0..d7a38f77804c 100644 --- a/src/query/service/src/table_functions/infer_schema/table_args.rs +++ b/src/query/service/src/table_functions/infer_schema/table_args.rs @@ -62,9 +62,8 @@ impl InferSchemaArgsParsed { } } - let location = location.ok_or(ErrorCode::BadArguments( - "infer_schema must specify location", - ))?; + let location = location + .ok_or_else(|| ErrorCode::BadArguments("infer_schema must specify location"))?; Ok(Self { location, diff --git a/src/query/service/src/table_functions/list_stage/table_args.rs b/src/query/service/src/table_functions/list_stage/table_args.rs index bd31829a7218..a1e81b5317f9 100644 --- a/src/query/service/src/table_functions/list_stage/table_args.rs +++ b/src/query/service/src/table_functions/list_stage/table_args.rs @@ -61,7 +61,7 @@ impl ListStageArgsParsed { } let location = - location.ok_or(ErrorCode::BadArguments("list_stage must specify location"))?; + location.ok_or_else(|| ErrorCode::BadArguments("list_stage must specify location"))?; Ok(Self { location, diff --git a/src/query/service/src/table_functions/numbers/numbers_part.rs b/src/query/service/src/table_functions/numbers/numbers_part.rs index e4de4a3a508b..9c03228c988f 100644 --- a/src/query/service/src/table_functions/numbers/numbers_part.rs +++ b/src/query/service/src/table_functions/numbers/numbers_part.rs @@ -58,9 +58,7 @@ impl NumbersPartInfo { pub fn from_part(info: &PartInfoPtr) -> Result<&NumbersPartInfo> { info.as_any() .downcast_ref::() - .ok_or(ErrorCode::Internal( - "Cannot downcast from PartInfo to NumbersPartInfo.", - )) + .ok_or_else(|| ErrorCode::Internal("Cannot downcast from PartInfo to NumbersPartInfo.")) } } diff --git a/src/query/service/tests/it/sql/exec/mod.rs b/src/query/service/tests/it/sql/exec/mod.rs index 7e8bdb04b8ff..58a3ef1db40f 100644 --- a/src/query/service/tests/it/sql/exec/mod.rs +++ b/src/query/service/tests/it/sql/exec/mod.rs @@ -105,22 +105,26 @@ pub async fn test_snapshot_consistency() -> Result<()> { let fuse_table0 = table0 .as_any() .downcast_ref::() - .ok_or(ErrorCode::Unimplemented(format!( - "table {}, engine type {}, does not support", - table0.name(), - table0.get_table_info().engine(), - ))) + .ok_or_else(|| { + ErrorCode::Unimplemented(format!( + "table {}, engine type {}, does not support", + table0.name(), + table0.get_table_info().engine(), + )) + }) .unwrap(); let snapshot0 = fuse_table0.read_table_snapshot().await?; let fuse_table1 = table1 .as_any() .downcast_ref::() - .ok_or(ErrorCode::Unimplemented(format!( - "table {}, engine type {}, does not support", - table1.name(), - table1.get_table_info().engine(), - ))) + .ok_or_else(|| { + ErrorCode::Unimplemented(format!( + "table {}, engine type {}, does not support", + table1.name(), + table1.get_table_info().engine(), + )) + }) .unwrap(); let snapshot1 = fuse_table1.read_table_snapshot().await?; diff --git a/src/query/sql/src/executor/physical_plans/physical_eval_scalar.rs b/src/query/sql/src/executor/physical_plans/physical_eval_scalar.rs index 51a5f356e5f5..d57887867e45 100644 --- a/src/query/sql/src/executor/physical_plans/physical_eval_scalar.rs +++ b/src/query/sql/src/executor/physical_plans/physical_eval_scalar.rs @@ -131,7 +131,7 @@ impl PhysicalPlanBuilder { let mut projections = ColumnSet::new(); for column in column_projections.iter() { - if let Ok(index) = input_schema.index_of(&column.to_string()) { + if let Some((index, _)) = input_schema.column_with_name(&column.to_string()) { projections.insert(index); } } diff --git a/src/query/sql/src/executor/physical_plans/physical_filter.rs b/src/query/sql/src/executor/physical_plans/physical_filter.rs index a29211dd9b88..cfc6f7285156 100644 --- a/src/query/sql/src/executor/physical_plans/physical_filter.rs +++ b/src/query/sql/src/executor/physical_plans/physical_filter.rs @@ -72,7 +72,7 @@ impl PhysicalPlanBuilder { let input_schema = input.output_schema()?; let mut projections = ColumnSet::new(); for column in column_projections.iter() { - if let Ok(index) = input_schema.index_of(&column.to_string()) { + if let Some((index, _)) = input_schema.column_with_name(&column.to_string()) { projections.insert(index); } } diff --git a/src/query/sql/src/executor/physical_plans/physical_hash_join.rs b/src/query/sql/src/executor/physical_plans/physical_hash_join.rs index 0f89416c33ea..fa2127c585d1 100644 --- a/src/query/sql/src/executor/physical_plans/physical_hash_join.rs +++ b/src/query/sql/src/executor/physical_plans/physical_hash_join.rs @@ -250,17 +250,17 @@ impl PhysicalPlanBuilder { let mut probe_projections = ColumnSet::new(); let mut build_projections = ColumnSet::new(); for column in pre_column_projections { - if let Ok(index) = probe_schema.index_of(&column.to_string()) { + if let Some((index, _)) = probe_schema.column_with_name(&column.to_string()) { probe_projections.insert(index); } - if let Ok(index) = build_schema.index_of(&column.to_string()) { + if let Some((index, _)) = build_schema.column_with_name(&column.to_string()) { build_projections.insert(index); } } // for distributed merge into, there is a field called "_row_number", but // it's not an internal row_number, we need to add it here - if let Ok(index) = build_schema.index_of(ROW_NUMBER_COL_NAME) { + if let Some((index, _)) = build_schema.column_with_name(ROW_NUMBER_COL_NAME) { build_projections.insert(index); } @@ -350,14 +350,14 @@ impl PhysicalPlanBuilder { let mut projections = ColumnSet::new(); let projected_schema = DataSchemaRefExt::create(merged_fields.clone()); for column in column_projections.iter() { - if let Ok(index) = projected_schema.index_of(&column.to_string()) { + if let Some((index, _)) = projected_schema.column_with_name(&column.to_string()) { projections.insert(index); } } // for distributed merge into, there is a field called "_row_number", but // it's not an internal row_number, we need to add it here - if let Ok(index) = projected_schema.index_of(ROW_NUMBER_COL_NAME) { + if let Some((index, _)) = projected_schema.column_with_name(ROW_NUMBER_COL_NAME) { projections.insert(index); } diff --git a/src/query/sql/src/executor/physical_plans/physical_project_set.rs b/src/query/sql/src/executor/physical_plans/physical_project_set.rs index 04c3b19faa42..835e5c9d4175 100644 --- a/src/query/sql/src/executor/physical_plans/physical_project_set.rs +++ b/src/query/sql/src/executor/physical_plans/physical_project_set.rs @@ -91,7 +91,7 @@ impl PhysicalPlanBuilder { let mut projections = ColumnSet::new(); for column in column_projections.iter() { - if let Ok(index) = input_schema.index_of(&column.to_string()) { + if let Some((index, _)) = input_schema.column_with_name(&column.to_string()) { projections.insert(index); } } diff --git a/src/query/sql/src/planner/binder/table.rs b/src/query/sql/src/planner/binder/table.rs index c0ef8ee4c877..799b301fb393 100644 --- a/src/query/sql/src/planner/binder/table.rs +++ b/src/query/sql/src/planner/binder/table.rs @@ -1202,7 +1202,7 @@ impl Binder { let options = table.options(); let table_version = options .get("table_version") - .ok_or(ErrorCode::Internal("table version must be set in stream"))? + .ok_or_else(|| ErrorCode::Internal("table version must be set in stream"))? .parse::()?; Some(table_version) } else { diff --git a/src/query/storages/common/locks/src/lock_manager.rs b/src/query/storages/common/locks/src/lock_manager.rs index d521b2634f13..1ba571a023dd 100644 --- a/src/query/storages/common/locks/src/lock_manager.rs +++ b/src/query/storages/common/locks/src/lock_manager.rs @@ -122,7 +122,7 @@ impl LockManager { let reply = catalog .list_lock_revisions(list_table_lock_req.clone()) .await?; - let position = reply.iter().position(|(x, _)| *x == revision).ok_or( + let position = reply.iter().position(|(x, _)| *x == revision).ok_or_else(|| // If the current is not found in list, it means that the current has been expired. ErrorCode::TableLockExpired("the acquired table lock has been expired".to_string()), )?; diff --git a/src/query/storages/common/pruner/src/block_meta.rs b/src/query/storages/common/pruner/src/block_meta.rs index 717e1884abb3..2e3d793236b6 100644 --- a/src/query/storages/common/pruner/src/block_meta.rs +++ b/src/query/storages/common/pruner/src/block_meta.rs @@ -51,8 +51,8 @@ impl BlockMetaInfo for BlockMetaIndex { impl BlockMetaIndex { pub fn from_meta(info: &BlockMetaInfoPtr) -> Result<&BlockMetaIndex> { - BlockMetaIndex::downcast_ref_from(info).ok_or(ErrorCode::Internal( - "Cannot downcast from BlockMetaInfo to BlockMetaIndex.", - )) + BlockMetaIndex::downcast_ref_from(info).ok_or_else(|| { + ErrorCode::Internal("Cannot downcast from BlockMetaInfo to BlockMetaIndex.") + }) } } diff --git a/src/query/storages/fuse/src/fuse_part.rs b/src/query/storages/fuse/src/fuse_part.rs index 5eead498e4e7..6f68dcd5db9e 100644 --- a/src/query/storages/fuse/src/fuse_part.rs +++ b/src/query/storages/fuse/src/fuse_part.rs @@ -90,9 +90,7 @@ impl FusePartInfo { pub fn from_part(info: &PartInfoPtr) -> Result<&FusePartInfo> { info.as_any() .downcast_ref::() - .ok_or(ErrorCode::Internal( - "Cannot downcast from PartInfo to FusePartInfo.", - )) + .ok_or_else(|| ErrorCode::Internal("Cannot downcast from PartInfo to FusePartInfo.")) } pub fn range(&self) -> Option<&Range> { diff --git a/src/query/storages/fuse/src/io/read/block/block_reader_merge_io_async.rs b/src/query/storages/fuse/src/io/read/block/block_reader_merge_io_async.rs index 5d0d9be67433..f864744f0397 100644 --- a/src/query/storages/fuse/src/io/read/block/block_reader_merge_io_async.rs +++ b/src/query/storages/fuse/src/io/read/block/block_reader_merge_io_async.rs @@ -115,7 +115,7 @@ impl BlockReader { let column_range = raw_range.start..raw_range.end; // Find the range index and Range from merged ranges. - let (merged_range_idx, merged_range) = range_merger.get(column_range.clone()).ok_or(ErrorCode::Internal(format!( + let (merged_range_idx, merged_range) = range_merger.get(column_range.clone()).ok_or_else(||ErrorCode::Internal(format!( "It's a terrible bug, not found raw range:[{:?}], path:{} from merged ranges\n: {:?}", column_range, location, merged_ranges )))?; diff --git a/src/query/storages/fuse/src/io/read/block/block_reader_merge_io_sync.rs b/src/query/storages/fuse/src/io/read/block/block_reader_merge_io_sync.rs index 0d3347dc5108..443a517c1672 100644 --- a/src/query/storages/fuse/src/io/read/block/block_reader_merge_io_sync.rs +++ b/src/query/storages/fuse/src/io/read/block/block_reader_merge_io_sync.rs @@ -80,7 +80,7 @@ impl BlockReader { let column_range = raw_range.start..raw_range.end; // Find the range index and Range from merged ranges. - let (merged_range_idx, merged_range) = range_merger.get(column_range.clone()).ok_or(ErrorCode::Internal(format!( + let (merged_range_idx, merged_range) = range_merger.get(column_range.clone()).ok_or_else(||ErrorCode::Internal(format!( "It's a terrible bug, not found raw range:[{:?}], path:{} from merged ranges\n: {:?}", column_range, path, merged_ranges )))?; diff --git a/src/query/storages/fuse/src/io/write/meta_writer.rs b/src/query/storages/fuse/src/io/write/meta_writer.rs index ea4ae9522a34..2cc8d96df09f 100644 --- a/src/query/storages/fuse/src/io/write/meta_writer.rs +++ b/src/query/storages/fuse/src/io/write/meta_writer.rs @@ -105,8 +105,8 @@ impl Marshal for TableSnapshotStatistics { #[cfg(test)] mod tests { use std::collections::HashMap; - use std::panic::catch_unwind; + use common_base::runtime::catch_unwind; use common_expression::TableSchema; use storages_common_table_meta::meta::SnapshotId; use storages_common_table_meta::meta::Statistics; diff --git a/src/query/storages/fuse/src/operations/common/processors/sink_commit.rs b/src/query/storages/fuse/src/operations/common/processors/sink_commit.rs index 4eb6727c2408..74d0786f3581 100644 --- a/src/query/storages/fuse/src/operations/common/processors/sink_commit.rs +++ b/src/query/storages/fuse/src/operations/common/processors/sink_commit.rs @@ -160,12 +160,12 @@ where F: SnapshotGenerator + Send + 'static .unwrap()? .get_meta() .cloned() - .ok_or(ErrorCode::Internal("No block meta. It's a bug"))?; + .ok_or_else(|| ErrorCode::Internal("No block meta. It's a bug"))?; self.input.finish(); let meta = CommitMeta::downcast_from(input_meta) - .ok_or(ErrorCode::Internal("No commit meta. It's a bug"))?; + .ok_or_else(|| ErrorCode::Internal("No commit meta. It's a bug"))?; self.abort_operation = meta.abort_operation; diff --git a/src/query/storages/fuse/src/operations/common/processors/transform_serialize_block.rs b/src/query/storages/fuse/src/operations/common/processors/transform_serialize_block.rs index fa21b67dab48..c9fa04ab6f70 100644 --- a/src/query/storages/fuse/src/operations/common/processors/transform_serialize_block.rs +++ b/src/query/storages/fuse/src/operations/common/processors/transform_serialize_block.rs @@ -187,8 +187,8 @@ impl Processor for TransformSerializeBlock { let mut input_data = self.input.pull_data().unwrap()?; let meta = input_data.take_meta(); if let Some(meta) = meta { - let meta = - SerializeDataMeta::downcast_from(meta).ok_or(ErrorCode::Internal("It's a bug"))?; + let meta = SerializeDataMeta::downcast_from(meta) + .ok_or_else(|| ErrorCode::Internal("It's a bug"))?; match meta { SerializeDataMeta::DeletedSegment(deleted_segment) => { // delete a whole segment, segment level diff --git a/src/query/storages/fuse/src/operations/common/processors/transform_serialize_segment.rs b/src/query/storages/fuse/src/operations/common/processors/transform_serialize_segment.rs index 4fafaff6e0fb..f4a78e000e33 100644 --- a/src/query/storages/fuse/src/operations/common/processors/transform_serialize_segment.rs +++ b/src/query/storages/fuse/src/operations/common/processors/transform_serialize_segment.rs @@ -165,9 +165,9 @@ impl Processor for TransformSerializeSegment { .unwrap()? .get_meta() .cloned() - .ok_or(ErrorCode::Internal("No block meta. It's a bug"))?; + .ok_or_else(|| ErrorCode::Internal("No block meta. It's a bug"))?; let block_meta = BlockMeta::downcast_ref_from(&input_meta) - .ok_or(ErrorCode::Internal("No commit meta. It's a bug"))? + .ok_or_else(|| ErrorCode::Internal("No commit meta. It's a bug"))? .clone(); self.accumulator.add_with_block_meta(block_meta); diff --git a/src/query/storages/fuse/src/operations/common/snapshot_generator.rs b/src/query/storages/fuse/src/operations/common/snapshot_generator.rs index 91672ef03294..b24fb24996c7 100644 --- a/src/query/storages/fuse/src/operations/common/snapshot_generator.rs +++ b/src/query/storages/fuse/src/operations/common/snapshot_generator.rs @@ -227,7 +227,7 @@ impl SnapshotGenerator for MutationGenerator { let ctx = self .conflict_resolve_ctx .as_ref() - .ok_or(ErrorCode::Internal("conflict_solve_ctx not set"))?; + .ok_or_else(|| ErrorCode::Internal("conflict_solve_ctx not set"))?; match ctx { ConflictResolveContext::AppendOnly(_) => { return Err(ErrorCode::Internal( @@ -325,7 +325,7 @@ impl AppendGenerator { let ctx = self .conflict_resolve_ctx .as_ref() - .ok_or(ErrorCode::Internal("conflict_solve_ctx not set"))?; + .ok_or_else(|| ErrorCode::Internal("conflict_solve_ctx not set"))?; match ctx { ConflictResolveContext::AppendOnly((ctx, schema)) => Ok((ctx, schema.as_ref())), _ => Err(ErrorCode::Internal( diff --git a/src/query/storages/fuse/src/operations/mutation/compact/compact_part.rs b/src/query/storages/fuse/src/operations/mutation/compact/compact_part.rs index 0a828527f1fa..7a4893a1169e 100644 --- a/src/query/storages/fuse/src/operations/mutation/compact/compact_part.rs +++ b/src/query/storages/fuse/src/operations/mutation/compact/compact_part.rs @@ -97,9 +97,7 @@ impl CompactPartInfo { pub fn from_part(info: &PartInfoPtr) -> Result<&CompactPartInfo> { info.as_any() .downcast_ref::() - .ok_or(ErrorCode::Internal( - "Cannot downcast from PartInfo to CompactPartInfo.", - )) + .ok_or_else(|| ErrorCode::Internal("Cannot downcast from PartInfo to CompactPartInfo.")) } } diff --git a/src/query/storages/fuse/src/operations/mutation/mutation_part.rs b/src/query/storages/fuse/src/operations/mutation/mutation_part.rs index 269af79dcc2f..68680c78fcf4 100644 --- a/src/query/storages/fuse/src/operations/mutation/mutation_part.rs +++ b/src/query/storages/fuse/src/operations/mutation/mutation_part.rs @@ -59,9 +59,7 @@ impl Mutation { pub fn from_part(info: &PartInfoPtr) -> Result<&Mutation> { info.as_any() .downcast_ref::() - .ok_or(ErrorCode::Internal( - "Cannot downcast from PartInfo to Mutation.", - )) + .ok_or_else(|| ErrorCode::Internal("Cannot downcast from PartInfo to Mutation.")) } } diff --git a/src/query/storages/fuse/src/operations/navigate.rs b/src/query/storages/fuse/src/operations/navigate.rs index 249b7b2f1054..f16e66d24314 100644 --- a/src/query/storages/fuse/src/operations/navigate.rs +++ b/src/query/storages/fuse/src/operations/navigate.rs @@ -239,9 +239,9 @@ impl FuseTable { modified <= retention_point }) .await?; - let location = location.ok_or(ErrorCode::TableHistoricalDataNotFound( - "No historical data found at given point", - ))?; + let location = location.ok_or_else(|| { + ErrorCode::TableHistoricalDataNotFound("No historical data found at given point") + })?; Ok((location, files)) } diff --git a/src/query/storages/fuse/src/table_functions/clustering_information/clustering_information.rs b/src/query/storages/fuse/src/table_functions/clustering_information/clustering_information.rs index 27bbb6896739..6d8a7abe5def 100644 --- a/src/query/storages/fuse/src/table_functions/clustering_information/clustering_information.rs +++ b/src/query/storages/fuse/src/table_functions/clustering_information/clustering_information.rs @@ -213,7 +213,7 @@ impl<'a> ClusteringInformation<'a> { let cluster_key = self .table .cluster_key_str() - .ok_or(ErrorCode::Internal("It's a bug"))?; + .ok_or_else(|| ErrorCode::Internal("It's a bug"))?; Ok(DataBlock::new( vec![ BlockEntry::new( diff --git a/src/query/storages/hive/hive/src/hive_block_filter.rs b/src/query/storages/hive/hive/src/hive_block_filter.rs index 6fb1ec36bdb0..7e1a1f9a44dd 100644 --- a/src/query/storages/hive/hive/src/hive_block_filter.rs +++ b/src/query/storages/hive/hive/src/hive_block_filter.rs @@ -87,8 +87,9 @@ impl HiveBlockFilter { in_memory_size as u64, None, ); - if let Ok(idx) = self.data_schema.index_of(col.name()) { - statistics.insert(idx as u32, col_stats); + if let Some((index, _)) = self.data_schema.column_with_name(col.name()) + { + statistics.insert(index as u32, col_stats); } } } @@ -96,7 +97,7 @@ impl HiveBlockFilter { } for (p_key, p_value) in part_columns { - if let Ok(idx) = self.data_schema.index_of(&p_key) { + if let Some((idx, _)) = self.data_schema.column_with_name(&p_key) { let mut null_count = 0; let v = if p_value == HIVE_DEFAULT_PARTITION { null_count = row_group.num_rows(); diff --git a/src/query/storages/hive/hive/src/hive_partition.rs b/src/query/storages/hive/hive/src/hive_partition.rs index 8d7e028b10a2..7888396ecda3 100644 --- a/src/query/storages/hive/hive/src/hive_partition.rs +++ b/src/query/storages/hive/hive/src/hive_partition.rs @@ -80,9 +80,7 @@ impl HivePartInfo { pub fn from_part(info: &PartInfoPtr) -> Result<&HivePartInfo> { info.as_any() .downcast_ref::() - .ok_or(ErrorCode::Internal( - "Cannot downcast from PartInfo to HivePartInfo.", - )) + .ok_or_else(|| ErrorCode::Internal("Cannot downcast from PartInfo to HivePartInfo.")) } } diff --git a/src/query/storages/hive/hive/src/hive_table.rs b/src/query/storages/hive/hive/src/hive_table.rs index 8aa4fad64985..45cc656e7811 100644 --- a/src/query/storages/hive/hive/src/hive_table.rs +++ b/src/query/storages/hive/hive/src/hive_table.rs @@ -449,14 +449,9 @@ impl HiveTable { ctx: Arc, push_downs: &Option, ) -> Result)>> { - let path = self - .table_options - .location - .as_ref() - .ok_or(ErrorCode::TableInfoError(format!( - "{}, table location is empty", - self.table_info.name - )))?; + let path = self.table_options.location.as_ref().ok_or_else(|| { + ErrorCode::TableInfoError(format!("{}, table location is empty", self.table_info.name)) + })?; if let Some(partition_keys) = &self.table_options.partition_keys { if !partition_keys.is_empty() { diff --git a/src/query/storages/iceberg/src/partition.rs b/src/query/storages/iceberg/src/partition.rs index 8b1ff25e5041..4824bac3be6c 100644 --- a/src/query/storages/iceberg/src/partition.rs +++ b/src/query/storages/iceberg/src/partition.rs @@ -32,9 +32,7 @@ impl IcebergPartInfo { pub fn from_part(info: &PartInfoPtr) -> Result<&IcebergPartInfo> { info.as_any() .downcast_ref::() - .ok_or(ErrorCode::Internal( - "Cannot downcast from PartInfo to IcebergPartInfo.", - )) + .ok_or_else(|| ErrorCode::Internal("Cannot downcast from PartInfo to IcebergPartInfo.")) } } diff --git a/src/query/storages/parquet/src/parquet_part.rs b/src/query/storages/parquet/src/parquet_part.rs index 6eeb836eda63..86dd968b65f4 100644 --- a/src/query/storages/parquet/src/parquet_part.rs +++ b/src/query/storages/parquet/src/parquet_part.rs @@ -97,9 +97,7 @@ impl ParquetPart { pub fn from_part(info: &PartInfoPtr) -> Result<&ParquetPart> { info.as_any() .downcast_ref::() - .ok_or(ErrorCode::Internal( - "Cannot downcast from PartInfo to ParquetPart.", - )) + .ok_or_else(|| ErrorCode::Internal("Cannot downcast from PartInfo to ParquetPart.")) } } diff --git a/src/query/storages/parquet/src/parquet_rs/parquet_reader/utils.rs b/src/query/storages/parquet/src/parquet_rs/parquet_reader/utils.rs index d6fd0585ebdd..63e9d68959e0 100644 --- a/src/query/storages/parquet/src/parquet_rs/parquet_reader/utils.rs +++ b/src/query/storages/parquet/src/parquet_rs/parquet_reader/utils.rs @@ -42,16 +42,16 @@ fn traverse_column( for idx in path.iter().take(path.len() - 1) { let struct_array = columns .get(*idx) - .ok_or(error_cannot_traverse_path(path, schema))? + .ok_or_else(|| error_cannot_traverse_path(path, schema))? .as_any() .downcast_ref::() - .ok_or(error_cannot_traverse_path(path, schema))?; + .ok_or_else(|| error_cannot_traverse_path(path, schema))?; columns = struct_array.columns(); } let idx = *path.last().unwrap(); let array = columns .get(idx) - .ok_or(error_cannot_traverse_path(path, schema))?; + .ok_or_else(|| error_cannot_traverse_path(path, schema))?; Ok(Column::from_arrow_rs(array.clone(), field)?) } @@ -139,7 +139,7 @@ pub fn compute_output_field_paths( let idx = fields .iter() .position(|t| t.name().eq_ignore_ascii_case(name)) - .ok_or(error_cannot_find_field(field.name(), parquet_schema))?; + .ok_or_else(|| error_cannot_find_field(field.name(), parquet_schema))?; path.push(idx); ty = &fields[idx]; } diff --git a/src/query/storages/random/src/random_parts.rs b/src/query/storages/random/src/random_parts.rs index 69fec856d73e..830f9a1c7c31 100644 --- a/src/query/storages/random/src/random_parts.rs +++ b/src/query/storages/random/src/random_parts.rs @@ -50,8 +50,6 @@ impl RandomPartInfo { pub fn from_part(info: &PartInfoPtr) -> Result<&RandomPartInfo> { info.as_any() .downcast_ref::() - .ok_or(ErrorCode::Internal( - "Cannot downcast from PartInfo to RandomPartInfo.", - )) + .ok_or_else(|| ErrorCode::Internal("Cannot downcast from PartInfo to RandomPartInfo.")) } } diff --git a/src/query/storages/system/src/log_queue.rs b/src/query/storages/system/src/log_queue.rs index 692c1f569744..d68ca9a9734d 100644 --- a/src/query/storages/system/src/log_queue.rs +++ b/src/query/storages/system/src/log_queue.rs @@ -94,7 +94,7 @@ impl SystemLogQueue { Some(instance) => instance .downcast_ref::>() .cloned() - .ok_or(ErrorCode::Internal("")), + .ok_or_else(|| ErrorCode::Internal("")), } } } diff --git a/src/query/users/src/jwt/jwk.rs b/src/query/users/src/jwt/jwk.rs index 863ed8b7b52a..b72a0544c904 100644 --- a/src/query/users/src/jwt/jwk.rs +++ b/src/query/users/src/jwt/jwk.rs @@ -152,13 +152,9 @@ impl JwkKeyStore { self.maybe_reload_keys().await?; let keys = self.keys.read(); match key_id { - Some(kid) => keys - .get(&kid) - .cloned() - .ok_or(ErrorCode::AuthenticateFailure(format!( - "key id {} not found", - &kid - ))), + Some(kid) => keys.get(&kid).cloned().ok_or_else(|| { + ErrorCode::AuthenticateFailure(format!("key id {} not found", &kid)) + }), None => { if keys.len() != 1 { Err(ErrorCode::AuthenticateFailure( From 05c79691dd289a0271f53cca6c1add9b3ba51845 Mon Sep 17 00:00:00 2001 From: everpcpc Date: Wed, 6 Dec 2023 09:22:42 +0800 Subject: [PATCH 15/16] chore(ci): fix install zig (#13933) --- scripts/setup/dev_setup.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/setup/dev_setup.sh b/scripts/setup/dev_setup.sh index 69bdc623d24f..6547559e7c7f 100755 --- a/scripts/setup/dev_setup.sh +++ b/scripts/setup/dev_setup.sh @@ -112,8 +112,9 @@ function install_ziglang { apt-get | yum | dnf | pacman) curl -sSfLo /tmp/zig.tar.xz "https://ziglang.org/download/0.11.0/zig-linux-${arch}-0.11.0.tar.xz" tar -xf /tmp/zig.tar.xz -C /tmp - "${PRE_COMMAND[@]}" cp "/tmp/zig-linux-${arch}-0.11.0/zig" /usr/local/bin/ + "${PRE_COMMAND[@]}" mv "/tmp/zig-linux-${arch}-0.11.0/zig" /usr/local/bin/ "${PRE_COMMAND[@]}" chmod +x /usr/local/bin/zig + "${PRE_COMMAND[@]}" mv "/tmp/zig-linux-${arch}-0.11.0/lib" /usr/local/lib/zig rm -rf /tmp/zig* ;; brew) From a2c8a80ccce6e7d3691dfa25974da9e590e40582 Mon Sep 17 00:00:00 2001 From: RinChanNOW Date: Wed, 6 Dec 2023 09:50:32 +0800 Subject: [PATCH 16/16] refactor: abstract `TransformSortMergeBase` to unify merge sort logic. (#13927) --- .../src/processors/transforms/mod.rs | 1 + .../processors/transforms/sort/rows/common.rs | 2 + .../processors/transforms/sort/rows/mod.rs | 1 + .../processors/transforms/sort/rows/simple.rs | 11 + .../transforms/transform_accumulating.rs | 6 + .../transforms/transform_sort_merge.rs | 299 ++++++------------ .../transforms/transform_sort_merge_base.rs | 162 ++++++++++ .../transforms/transform_sort_merge_limit.rs | 237 +++++--------- .../suites/mode/standalone/explain/sort.test | 8 +- .../mode/standalone/explain/window.test | 8 +- 10 files changed, 372 insertions(+), 363 deletions(-) create mode 100644 src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge_base.rs diff --git a/src/query/pipeline/transforms/src/processors/transforms/mod.rs b/src/query/pipeline/transforms/src/processors/transforms/mod.rs index 7373b394e855..1869e9067323 100644 --- a/src/query/pipeline/transforms/src/processors/transforms/mod.rs +++ b/src/query/pipeline/transforms/src/processors/transforms/mod.rs @@ -23,6 +23,7 @@ mod transform_blocking; mod transform_compact; mod transform_dummy; mod transform_multi_sort_merge; +mod transform_sort_merge_base; pub mod transform_sort; mod transform_sort_merge; diff --git a/src/query/pipeline/transforms/src/processors/transforms/sort/rows/common.rs b/src/query/pipeline/transforms/src/processors/transforms/sort/rows/common.rs index 80ec344e25cd..e580034e127a 100644 --- a/src/query/pipeline/transforms/src/processors/transforms/sort/rows/common.rs +++ b/src/query/pipeline/transforms/src/processors/transforms/sort/rows/common.rs @@ -31,6 +31,8 @@ use jsonb::convert_to_comparable; use super::RowConverter; use super::Rows; +pub type CommonRows = StringColumn; + impl Rows for StringColumn { type Item<'a> = &'a [u8]; diff --git a/src/query/pipeline/transforms/src/processors/transforms/sort/rows/mod.rs b/src/query/pipeline/transforms/src/processors/transforms/sort/rows/mod.rs index be3d0a4dc05f..0ca92a3dceb6 100644 --- a/src/query/pipeline/transforms/src/processors/transforms/sort/rows/mod.rs +++ b/src/query/pipeline/transforms/src/processors/transforms/sort/rows/mod.rs @@ -17,6 +17,7 @@ mod simple; use std::sync::Arc; +pub use common::*; use common_exception::Result; use common_expression::BlockEntry; use common_expression::Column; diff --git a/src/query/pipeline/transforms/src/processors/transforms/sort/rows/simple.rs b/src/query/pipeline/transforms/src/processors/transforms/sort/rows/simple.rs index c9223bed9b56..bf02daa0309e 100644 --- a/src/query/pipeline/transforms/src/processors/transforms/sort/rows/simple.rs +++ b/src/query/pipeline/transforms/src/processors/transforms/sort/rows/simple.rs @@ -18,6 +18,9 @@ use std::marker::PhantomData; use common_exception::ErrorCode; use common_exception::Result; use common_expression::types::ArgType; +use common_expression::types::DateType; +use common_expression::types::StringType; +use common_expression::types::TimestampType; use common_expression::types::ValueType; use common_expression::BlockEntry; use common_expression::Column; @@ -29,6 +32,10 @@ use common_expression::Value; use super::RowConverter; use super::Rows; +pub type DateRows = SimpleRows; +pub type TimestampRows = SimpleRows; +pub type StringRows = SimpleRows; + /// Row structure for single simple types. (numbers, date, timestamp) #[derive(Clone, Copy)] pub struct SimpleRow { @@ -116,6 +123,10 @@ where } } +pub type DateConverter = SimpleRowConverter; +pub type TimestampConverter = SimpleRowConverter; +pub type StringConverter = SimpleRowConverter; + /// If there is only one sort field and its type is a primitive type, /// use this converter. pub struct SimpleRowConverter { diff --git a/src/query/pipeline/transforms/src/processors/transforms/transform_accumulating.rs b/src/query/pipeline/transforms/src/processors/transforms/transform_accumulating.rs index 135ed03fa261..36f5dfe269e4 100644 --- a/src/query/pipeline/transforms/src/processors/transforms/transform_accumulating.rs +++ b/src/query/pipeline/transforms/src/processors/transforms/transform_accumulating.rs @@ -34,6 +34,8 @@ pub trait AccumulatingTransform: Send { fn on_finish(&mut self, _output: bool) -> Result> { Ok(vec![]) } + + fn interrupt(&self) {} } pub struct AccumulatingTransformer { @@ -133,6 +135,10 @@ impl Processor for AccumulatingTransformer: Send + 'static { diff --git a/src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge.rs b/src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge.rs index 69ec2ae70be0..e0044f1cf7fa 100644 --- a/src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge.rs +++ b/src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge.rs @@ -15,195 +15,94 @@ use std::cmp::Reverse; use std::collections::BinaryHeap; use std::intrinsics::unlikely; -use std::marker::PhantomData; use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; use std::sync::Arc; use common_exception::ErrorCode; use common_exception::Result; -use common_expression::row::RowConverter as CommonRowConverter; -use common_expression::types::string::StringColumn; +use common_expression::row::RowConverter as CommonConverter; use common_expression::types::DataType; -use common_expression::types::DateType; use common_expression::types::NumberDataType; use common_expression::types::NumberType; -use common_expression::types::StringType; -use common_expression::types::TimestampType; use common_expression::with_number_mapped_type; -use common_expression::BlockEntry; use common_expression::DataBlock; use common_expression::DataSchemaRef; use common_expression::SortColumnDescription; -use common_expression::Value; use common_pipeline_core::processors::InputPort; use common_pipeline_core::processors::OutputPort; use common_pipeline_core::processors::Processor; +use super::sort::CommonRows; use super::sort::Cursor; -use super::sort::RowConverter; +use super::sort::DateConverter; +use super::sort::DateRows; use super::sort::Rows; use super::sort::SimpleRowConverter; use super::sort::SimpleRows; -use super::Compactor; -use super::TransformCompact; +use super::sort::StringConverter; +use super::sort::StringRows; +use super::sort::TimestampConverter; +use super::sort::TimestampRows; +use super::transform_sort_merge_base::MergeSort; +use super::transform_sort_merge_base::Status; +use super::transform_sort_merge_base::TransformSortMergeBase; +use super::AccumulatingTransform; +use super::AccumulatingTransformer; /// Merge sort blocks without limit. /// /// For merge sort with limit, see [`super::transform_sort_merge_limit`] -pub struct SortMergeCompactor { +pub struct TransformSortMerge { block_size: usize, - row_converter: Converter, - sort_desc: Vec, + heap: BinaryHeap>>, + buffer: Vec, aborting: Arc, - - /// If the next transform of current transform is [`super::transform_multi_sort_merge::MultiSortMergeProcessor`], - /// we can generate and output the order column to avoid the extra converting in the next transform. - output_order_col: bool, - /// If this transform is after an Exchange transform, - /// it means it will compact the data from cluster nodes. - /// And the order column is already generated in each cluster node, - /// so we don't need to generate the order column again. - order_col_generated: bool, - - _c: PhantomData, - _r: PhantomData, } -impl SortMergeCompactor -where - R: Rows, - Converter: RowConverter, -{ - pub fn try_create( - schema: DataSchemaRef, - block_size: usize, - sort_desc: Vec, - order_col_generated: bool, - output_order_col: bool, - ) -> Result { - debug_assert!(if order_col_generated { - // If the order column is already generated, - // it means this transform is after a exchange source and it's the last transform for sorting. - // We should remove the order column. - !output_order_col - } else { - true - }); - - let row_converter = Converter::create(&sort_desc, schema)?; - Ok(SortMergeCompactor { - row_converter, +impl TransformSortMerge { + pub fn create(block_size: usize) -> Self { + TransformSortMerge { block_size, - sort_desc, + heap: BinaryHeap::new(), + buffer: vec![], aborting: Arc::new(AtomicBool::new(false)), - order_col_generated, - output_order_col, - _c: PhantomData, - _r: PhantomData, - }) + } } } -impl Compactor for SortMergeCompactor -where - R: Rows, - Converter: RowConverter, -{ - fn name() -> &'static str { - "SortMergeTransform" - } +impl MergeSort for TransformSortMerge { + const NAME: &'static str = "TransformSortMerge"; - fn interrupt(&self) { - self.aborting.store(true, Ordering::Release); - } - - fn compact_final(&mut self, blocks: Vec) -> Result> { - if blocks.is_empty() { - return Ok(vec![]); + fn add_block(&mut self, block: DataBlock, init_cursor: Cursor) -> Result { + if unlikely(self.aborting.load(Ordering::Relaxed)) { + return Err(ErrorCode::AbortedQuery( + "Aborted query, because the server is shutting down or the query was killed.", + )); } - let output_size = blocks.iter().map(|b| b.num_rows()).sum::(); - if output_size == 0 { - return Ok(vec![]); + if unlikely(block.is_empty()) { + return Ok(Status::Continue); } - let mut blocks = blocks - .into_iter() - .filter(|b| !b.is_empty()) - .collect::>(); + self.buffer.push(block); + self.heap.push(Reverse(init_cursor)); + Ok(Status::Continue) + } - if blocks.len() == 1 { - let block = blocks - .get_mut(0) - .ok_or_else(|| ErrorCode::Internal("It's a bug"))?; - if self.order_col_generated { - // Need to remove order column. - block.pop_columns(1); - return Ok(blocks); - } - if self.output_order_col { - let columns = self - .sort_desc - .iter() - .map(|d| block.get_by_offset(d.offset).clone()) - .collect::>(); - let rows = self.row_converter.convert(&columns, block.num_rows())?; - let order_col = rows.to_column(); - if self.output_order_col { - block.add_column(BlockEntry { - data_type: order_col.data_type(), - value: Value::Column(order_col), - }); - } - } - return Ok(blocks); + fn on_finish(&mut self) -> Result> { + let output_size = self.buffer.iter().map(|b| b.num_rows()).sum::(); + if output_size == 0 { + return Ok(vec![]); } let output_block_num = output_size.div_ceil(self.block_size); let mut output_blocks = Vec::with_capacity(output_block_num); let mut output_indices = Vec::with_capacity(output_size); - let mut heap: BinaryHeap>> = BinaryHeap::with_capacity(blocks.len()); - // 1. Put all blocks into a min-heap. - for (i, block) in blocks.iter_mut().enumerate() { - let rows = if self.order_col_generated { - let order_col = block - .columns() - .last() - .unwrap() - .value - .as_column() - .unwrap() - .clone(); - let rows = R::from_column(order_col, &self.sort_desc) - .ok_or_else(|| ErrorCode::BadDataValueType("Order column type mismatched."))?; - // Need to remove order column. - block.pop_columns(1); - rows - } else { - let columns = self - .sort_desc - .iter() - .map(|d| block.get_by_offset(d.offset).clone()) - .collect::>(); - let rows = self.row_converter.convert(&columns, block.num_rows())?; - if self.output_order_col { - let order_col = rows.to_column(); - block.add_column(BlockEntry { - data_type: order_col.data_type(), - value: Value::Column(order_col), - }); - } - rows - }; - let cursor = Cursor::new(i, rows); - heap.push(Reverse(cursor)); - } - - // 2. Drain the heap - while let Some(Reverse(mut cursor)) = heap.pop() { + // 1. Drain the heap + while let Some(Reverse(mut cursor)) = self.heap.pop() { if unlikely(self.aborting.load(Ordering::Relaxed)) { return Err(ErrorCode::AbortedQuery( "Aborted query, because the server is shutting down or the query was killed.", @@ -211,13 +110,13 @@ where } let block_idx = cursor.input_index; - if heap.is_empty() { + if self.heap.is_empty() { // If there is no other block in the heap, we can drain the whole block. while !cursor.is_finished() { output_indices.push((block_idx, cursor.advance())); } } else { - let next_cursor = &heap.peek().unwrap().0; + let next_cursor = &self.heap.peek().unwrap().0; // If the last row of current block is smaller than the next cursor, // we can drain the whole block. if cursor.last().le(&next_cursor.current()) { @@ -230,13 +129,13 @@ where output_indices.push((block_idx, cursor.advance())); } if !cursor.is_finished() { - heap.push(Reverse(cursor)); + self.heap.push(Reverse(cursor)); } } } } - // 3. Build final blocks from `output_indices`. + // 2. Build final blocks from `output_indices`. for i in 0..output_block_num { if unlikely(self.aborting.load(Ordering::Relaxed)) { return Err(ErrorCode::AbortedQuery( @@ -258,119 +157,121 @@ where merge_slices.push((*block_idx, *row_idx, 1)); } } - let block = DataBlock::take_by_slices_limit_from_blocks(&blocks, &merge_slices, None); + let block = + DataBlock::take_by_slices_limit_from_blocks(&self.buffer, &merge_slices, None); output_blocks.push(block); } Ok(output_blocks) } + + fn interrupt(&self) { + self.aborting.store(true, Ordering::Release); + } } -type SimpleDateCompactor = SortMergeCompactor, SimpleRowConverter>; -type SimpleDateSort = TransformCompact; +type MergeSortDateImpl = TransformSortMerge; +type MergeSortDate = TransformSortMergeBase; -type SimpleTimestampCompactor = - SortMergeCompactor, SimpleRowConverter>; -type SimpleTimestampSort = TransformCompact; +type MergeSortTimestampImpl = TransformSortMerge; +type MergeSortTimestamp = + TransformSortMergeBase; -type SimpleStringCompactor = - SortMergeCompactor, SimpleRowConverter>; -type SimpleStringSort = TransformCompact; +type MergeSortStringImpl = TransformSortMerge; +type MergeSortString = TransformSortMergeBase; -type CommonCompactor = SortMergeCompactor; -type CommonSort = TransformCompact; +type MergeSortCommonImpl = TransformSortMerge; +type MergeSortCommon = TransformSortMergeBase; pub fn try_create_transform_sort_merge( input: Arc, output: Arc, - output_schema: DataSchemaRef, + schema: DataSchemaRef, block_size: usize, sort_desc: Vec, order_col_generated: bool, output_order_col: bool, ) -> Result> { - if sort_desc.len() == 1 { - let sort_type = output_schema.field(sort_desc[0].offset).data_type(); + let processor = if sort_desc.len() == 1 { + let sort_type = schema.field(sort_desc[0].offset).data_type(); match sort_type { DataType::Number(num_ty) => with_number_mapped_type!(|NUM_TYPE| match num_ty { - NumberDataType::NUM_TYPE => TransformCompact::< - SortMergeCompactor< - SimpleRows>, - SimpleRowConverter>, - >, - >::try_create( + NumberDataType::NUM_TYPE => AccumulatingTransformer::create( input, output, - SortMergeCompactor::< + TransformSortMergeBase::< + TransformSortMerge>>, SimpleRows>, SimpleRowConverter>, >::try_create( - output_schema, - block_size, + schema, sort_desc, order_col_generated, - output_order_col - )? + output_order_col, + TransformSortMerge::create(block_size), + )?, ), }), - DataType::Date => SimpleDateSort::try_create( + DataType::Date => AccumulatingTransformer::create( input, output, - SimpleDateCompactor::try_create( - output_schema, - block_size, + MergeSortDate::try_create( + schema, sort_desc, order_col_generated, output_order_col, + MergeSortDateImpl::create(block_size), )?, ), - DataType::Timestamp => SimpleTimestampSort::try_create( + DataType::Timestamp => AccumulatingTransformer::create( input, output, - SimpleTimestampCompactor::try_create( - output_schema, - block_size, + MergeSortTimestamp::try_create( + schema, sort_desc, order_col_generated, output_order_col, + MergeSortTimestampImpl::create(block_size), )?, ), - DataType::String => SimpleStringSort::try_create( + DataType::String => AccumulatingTransformer::create( input, output, - SimpleStringCompactor::try_create( - output_schema, - block_size, + MergeSortString::try_create( + schema, sort_desc, order_col_generated, output_order_col, + MergeSortStringImpl::create(block_size), )?, ), - _ => CommonSort::try_create( + _ => AccumulatingTransformer::create( input, output, - CommonCompactor::try_create( - output_schema, - block_size, + MergeSortCommon::try_create( + schema, sort_desc, order_col_generated, output_order_col, + MergeSortCommonImpl::create(block_size), )?, ), } } else { - CommonSort::try_create( + AccumulatingTransformer::create( input, output, - CommonCompactor::try_create( - output_schema, - block_size, + MergeSortCommon::try_create( + schema, sort_desc, order_col_generated, output_order_col, + MergeSortCommonImpl::create(block_size), )?, ) - } + }; + + Ok(processor) } pub fn sort_merge( @@ -379,7 +280,15 @@ pub fn sort_merge( sort_desc: Vec, data_blocks: Vec, ) -> Result> { - let mut compactor = - CommonCompactor::try_create(data_schema, block_size, sort_desc, false, false)?; - compactor.compact_final(data_blocks) + let mut processor = MergeSortCommon::try_create( + data_schema, + sort_desc, + false, + false, + MergeSortCommonImpl::create(block_size), + )?; + for block in data_blocks { + processor.transform(block)?; + } + processor.on_finish(true) } diff --git a/src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge_base.rs b/src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge_base.rs new file mode 100644 index 000000000000..132c2c94ce4d --- /dev/null +++ b/src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge_base.rs @@ -0,0 +1,162 @@ +// Copyright 2021 Datafuse Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::marker::PhantomData; + +use common_exception::ErrorCode; +use common_exception::Result; +use common_expression::BlockEntry; +use common_expression::DataBlock; +use common_expression::DataSchemaRef; +use common_expression::SortColumnDescription; +use common_expression::Value; + +use super::sort::Cursor; +use super::sort::RowConverter; +use super::sort::Rows; +use super::AccumulatingTransform; + +pub enum Status { + /// Continue to add blocks. + Continue, + // TODO(spill): transfer these blocks to spilling transform. + // Need to spill blocks. + // This status is not used currently. + // Spill, +} + +pub trait MergeSort { + const NAME: &'static str; + + /// Add a block to the merge sort processor. + /// `block` is the input data block. + /// `init_cursor` is the initial sorting cursor of this `block`. + fn add_block(&mut self, block: DataBlock, init_cursor: Cursor) -> Result; + + fn on_finish(&mut self) -> Result>; + + fn interrupt(&self) {} +} + +/// The base struct for merging sorted blocks from a single thread. +pub struct TransformSortMergeBase { + inner: M, + + row_converter: Converter, + sort_desc: Vec, + /// If the next transform of current transform is [`super::transform_multi_sort_merge::MultiSortMergeProcessor`], + /// we can generate and output the order column to avoid the extra converting in the next transform. + output_order_col: bool, + /// If this transform is after an Exchange transform, + /// it means it will compact the data from cluster nodes. + /// And the order column is already generated in each cluster node, + /// so we don't need to generate the order column again. + order_col_generated: bool, + + /// The index for the next input block. + next_index: usize, + + _r: PhantomData, +} + +impl TransformSortMergeBase +where + M: MergeSort, + R: Rows, + Converter: RowConverter, +{ + pub fn try_create( + schema: DataSchemaRef, + sort_desc: Vec, + order_col_generated: bool, + output_order_col: bool, + inner: M, + ) -> Result { + debug_assert!(if order_col_generated { + // If the order column is already generated, + // it means this transform is after a exchange source and it's the last transform for sorting. + // We should remove the order column. + !output_order_col + } else { + true + }); + + let row_converter = Converter::create(&sort_desc, schema)?; + + Ok(Self { + inner, + row_converter, + sort_desc, + output_order_col, + order_col_generated, + next_index: 0, + _r: PhantomData, + }) + } +} + +impl AccumulatingTransform for TransformSortMergeBase +where + M: MergeSort + Send + Sync, + R: Rows + Send + Sync, + Converter: RowConverter + Send + Sync, +{ + const NAME: &'static str = M::NAME; + + fn transform(&mut self, mut block: DataBlock) -> Result> { + let rows = if self.order_col_generated { + let order_col = block + .columns() + .last() + .unwrap() + .value + .as_column() + .unwrap() + .clone(); + let rows = R::from_column(order_col, &self.sort_desc) + .ok_or_else(|| ErrorCode::BadDataValueType("Order column type mismatched."))?; + // Need to remove order column. + block.pop_columns(1); + rows + } else { + let order_by_cols = self + .sort_desc + .iter() + .map(|d| block.get_by_offset(d.offset).clone()) + .collect::>(); + let rows = self + .row_converter + .convert(&order_by_cols, block.num_rows())?; + if self.output_order_col { + let order_col = rows.to_column(); + block.add_column(BlockEntry { + data_type: order_col.data_type(), + value: Value::Column(order_col), + }); + } + rows + }; + + let cursor = Cursor::new(self.next_index, rows); + self.next_index += 1; + + match self.inner.add_block(block, cursor)? { + Status::Continue => Ok(vec![]), + } + } + + fn on_finish(&mut self, _output: bool) -> Result> { + self.inner.on_finish() + } +} diff --git a/src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge_limit.rs b/src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge_limit.rs index 21af08fc568e..ac8e71c90f59 100644 --- a/src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge_limit.rs +++ b/src/query/pipeline/transforms/src/processors/transforms/transform_sort_merge_limit.rs @@ -14,140 +14,58 @@ use std::cmp::Reverse; use std::collections::HashMap; +use std::intrinsics::unlikely; use std::sync::Arc; use common_base::containers::FixedHeap; -use common_exception::ErrorCode; use common_exception::Result; -use common_expression::row::RowConverter as CommonRowConverter; -use common_expression::types::string::StringColumn; +use common_expression::row::RowConverter as CommonConverter; use common_expression::types::DataType; -use common_expression::types::DateType; use common_expression::types::NumberDataType; use common_expression::types::NumberType; -use common_expression::types::StringType; -use common_expression::types::TimestampType; use common_expression::with_number_mapped_type; -use common_expression::BlockEntry; use common_expression::DataBlock; use common_expression::DataSchemaRef; use common_expression::SortColumnDescription; -use common_expression::Value; use common_pipeline_core::processors::InputPort; use common_pipeline_core::processors::OutputPort; use common_pipeline_core::processors::Processor; +use super::sort::CommonRows; use super::sort::Cursor; -use super::sort::RowConverter; +use super::sort::DateConverter; +use super::sort::DateRows; use super::sort::Rows; use super::sort::SimpleRowConverter; use super::sort::SimpleRows; -use super::AccumulatingTransform; +use super::sort::StringConverter; +use super::sort::StringRows; +use super::sort::TimestampConverter; +use super::sort::TimestampRows; +use super::transform_sort_merge_base::MergeSort; +use super::transform_sort_merge_base::Status; +use super::transform_sort_merge_base::TransformSortMergeBase; use super::AccumulatingTransformer; -/// This is a specific version of [`super::transform_sort_merge::SortMergeCompactor`] which sort blocks with limit. -/// -/// Definitions of some same fields can be found in [`super::transform_sort_merge::SortMergeCompactor`]. -pub struct TransformSortMergeLimit { - row_converter: Converter, - heap: FixedHeap>>>, - sort_desc: Vec, - +/// This is a specific version of [`super::transform_sort_merge::TransformSortMerge`] which sort blocks with limit. +pub struct TransformSortMergeLimit { + heap: FixedHeap>>, buffer: HashMap, - cur_index: usize, block_size: usize, - - order_col_generated: bool, - output_order_col: bool, } -impl TransformSortMergeLimit -where - R: Rows, - Converter: RowConverter, -{ - pub fn try_create( - schema: DataSchemaRef, - sort_desc: Vec, - block_size: usize, - limit: usize, - order_col_generated: bool, - output_order_col: bool, - ) -> Result { - debug_assert!(if order_col_generated { - !output_order_col - } else { - true - }); - - let row_converter = Converter::create(&sort_desc, schema)?; - Ok(TransformSortMergeLimit { - row_converter, - sort_desc, - heap: FixedHeap::new(limit), - buffer: HashMap::with_capacity(limit), - block_size, - cur_index: 0, - order_col_generated, - output_order_col, - }) - } -} - -impl AccumulatingTransform for TransformSortMergeLimit -where - R: Rows + Send + Sync, - Converter: RowConverter + Send + Sync, -{ +impl MergeSort for TransformSortMergeLimit { const NAME: &'static str = "TransformSortMergeLimit"; - fn transform(&mut self, mut data: DataBlock) -> Result> { - if self.heap.cap() == 0 { - // limit is 0 - return Ok(vec![]); + fn add_block(&mut self, block: DataBlock, mut cursor: Cursor) -> Result { + if unlikely(self.heap.cap() == 0 || block.is_empty()) { + // limit is 0 or block is empty. + return Ok(Status::Continue); } - if data.is_empty() { - return Ok(vec![]); - } - - let rows = if self.order_col_generated { - let order_col = data - .columns() - .last() - .unwrap() - .value - .as_column() - .unwrap() - .clone(); - let rows = R::from_column(order_col, &self.sort_desc) - .ok_or_else(|| ErrorCode::BadDataValueType("Order column type mismatched."))?; - // Need to remove order column. - data.pop_columns(1); - Arc::new(rows) - } else { - let order_by_cols = self - .sort_desc - .iter() - .map(|d| data.get_by_offset(d.offset).clone()) - .collect::>(); - let rows = Arc::new( - self.row_converter - .convert(&order_by_cols, data.num_rows())?, - ); - if self.output_order_col { - let order_col = rows.to_column(); - data.add_column(BlockEntry { - data_type: order_col.data_type(), - value: Value::Column(order_col), - }); - } - rows - }; - - let mut cursor = Cursor::new(self.cur_index, rows); - self.buffer.insert(self.cur_index, data); + let cur_index = cursor.input_index; + self.buffer.insert(cur_index, block); while !cursor.is_finished() { if let Some(Reverse(evict)) = self.heap.push(Reverse(cursor.clone())) { @@ -157,7 +75,7 @@ where self.buffer.remove(&evict.input_index); } - if evict.input_index == self.cur_index { + if evict.input_index == cur_index { // The Top-N heap is full, and later rows in current block cannot be put into the heap. break; } @@ -165,11 +83,10 @@ where cursor.advance(); } - self.cur_index += 1; - Ok(vec![]) + Ok(Status::Continue) } - fn on_finish(&mut self, _output: bool) -> Result> { + fn on_finish(&mut self) -> Result> { if self.heap.is_empty() { return Ok(vec![]); } @@ -213,118 +130,118 @@ where } } -type SimpleDateTransform = - TransformSortMergeLimit, SimpleRowConverter>; -type SimpleDateSort = AccumulatingTransformer; +impl TransformSortMergeLimit { + pub fn create(block_size: usize, limit: usize) -> Self { + TransformSortMergeLimit { + heap: FixedHeap::new(limit), + buffer: HashMap::with_capacity(limit), + block_size, + } + } +} + +type MergeSortDateImpl = TransformSortMergeLimit; +type MergeSortDate = TransformSortMergeBase; -type SimpleTimestampTransform = - TransformSortMergeLimit, SimpleRowConverter>; -type SimpleTimestampSort = AccumulatingTransformer; +type MergeSortTimestampImpl = TransformSortMergeLimit; +type MergeSortTimestamp = + TransformSortMergeBase; -type SimpleStringTransform = - TransformSortMergeLimit, SimpleRowConverter>; -type SimpleStringSort = AccumulatingTransformer; +type MergeSortStringImpl = TransformSortMergeLimit; +type MergeSortString = TransformSortMergeBase; -type CommonTransform = TransformSortMergeLimit; -type CommonSort = AccumulatingTransformer; +type MergeSortCommonImpl = TransformSortMergeLimit; +type MergeSortCommon = TransformSortMergeBase; #[allow(clippy::too_many_arguments)] pub fn try_create_transform_sort_merge_limit( input: Arc, output: Arc, - input_schema: DataSchemaRef, + schema: DataSchemaRef, sort_desc: Vec, block_size: usize, limit: usize, order_col_generated: bool, output_order_col: bool, ) -> Result> { - Ok(if sort_desc.len() == 1 { - let sort_type = input_schema.field(sort_desc[0].offset).data_type(); + let processor = if sort_desc.len() == 1 { + let sort_type = schema.field(sort_desc[0].offset).data_type(); match sort_type { DataType::Number(num_ty) => with_number_mapped_type!(|NUM_TYPE| match num_ty { - NumberDataType::NUM_TYPE => AccumulatingTransformer::< - TransformSortMergeLimit< - SimpleRows>, - SimpleRowConverter>, - >, - >::create( + NumberDataType::NUM_TYPE => AccumulatingTransformer::create( input, output, - TransformSortMergeLimit::< + TransformSortMergeBase::< + TransformSortMergeLimit>>, SimpleRows>, SimpleRowConverter>, >::try_create( - input_schema, + schema, sort_desc, - block_size, - limit, order_col_generated, - output_order_col - )? + output_order_col, + TransformSortMergeLimit::create(block_size, limit), + )?, ), }), - DataType::Date => SimpleDateSort::create( + DataType::Date => AccumulatingTransformer::create( input, output, - SimpleDateTransform::try_create( - input_schema, + MergeSortDate::try_create( + schema, sort_desc, - block_size, - limit, order_col_generated, output_order_col, + MergeSortDateImpl::create(block_size, limit), )?, ), - DataType::Timestamp => SimpleTimestampSort::create( + DataType::Timestamp => AccumulatingTransformer::create( input, output, - SimpleTimestampTransform::try_create( - input_schema, + MergeSortTimestamp::try_create( + schema, sort_desc, - block_size, - limit, order_col_generated, output_order_col, + MergeSortTimestampImpl::create(block_size, limit), )?, ), - DataType::String => SimpleStringSort::create( + DataType::String => AccumulatingTransformer::create( input, output, - SimpleStringTransform::try_create( - input_schema, + MergeSortString::try_create( + schema, sort_desc, - block_size, - limit, order_col_generated, output_order_col, + MergeSortStringImpl::create(block_size, limit), )?, ), - _ => CommonSort::create( + _ => AccumulatingTransformer::create( input, output, - CommonTransform::try_create( - input_schema, + MergeSortCommon::try_create( + schema, sort_desc, - block_size, - limit, order_col_generated, output_order_col, + MergeSortCommonImpl::create(block_size, limit), )?, ), } } else { - CommonSort::create( + AccumulatingTransformer::create( input, output, - CommonTransform::try_create( - input_schema, + MergeSortCommon::try_create( + schema, sort_desc, - block_size, - limit, order_col_generated, output_order_col, + MergeSortCommonImpl::create(block_size, limit), )?, ) - }) + }; + + Ok(processor) } diff --git a/tests/sqllogictests/suites/mode/standalone/explain/sort.test b/tests/sqllogictests/suites/mode/standalone/explain/sort.test index 3ccde3aa10ca..1076a7fe9057 100644 --- a/tests/sqllogictests/suites/mode/standalone/explain/sort.test +++ b/tests/sqllogictests/suites/mode/standalone/explain/sort.test @@ -73,8 +73,8 @@ query T explain pipeline select a, b from t1 order by a; ---- CompoundBlockOperator(Project) × 1 processor - Merge (SortMergeTransform × 4 processors) to (CompoundBlockOperator(Project) × 1) - SortMergeTransform × 4 processors + Merge (TransformSortMerge × 4 processors) to (CompoundBlockOperator(Project) × 1) + TransformSortMerge × 4 processors SortPartialTransform × 4 processors Merge (DeserializeDataTransform × 1 processor) to (SortPartialTransform × 4) DeserializeDataTransform × 1 processor @@ -86,8 +86,8 @@ query T explain pipeline select a + 1, b from t1 order by a + 1; ---- CompoundBlockOperator(Project) × 1 processor - Merge (SortMergeTransform × 4 processors) to (CompoundBlockOperator(Project) × 1) - SortMergeTransform × 4 processors + Merge (TransformSortMerge × 4 processors) to (CompoundBlockOperator(Project) × 1) + TransformSortMerge × 4 processors SortPartialTransform × 4 processors Merge (CompoundBlockOperator(Map) × 1 processor) to (SortPartialTransform × 4) CompoundBlockOperator(Map) × 1 processor diff --git a/tests/sqllogictests/suites/mode/standalone/explain/window.test b/tests/sqllogictests/suites/mode/standalone/explain/window.test index 14770c0dec51..5d94afb9db62 100644 --- a/tests/sqllogictests/suites/mode/standalone/explain/window.test +++ b/tests/sqllogictests/suites/mode/standalone/explain/window.test @@ -40,13 +40,13 @@ query explain pipeline SELECT depname, empno, salary, sum(salary) OVER (PARTITION BY depname ORDER BY empno) FROM empsalary ORDER BY depname, empno; ---- CompoundBlockOperator(Project) × 1 processor - Merge (SortMergeTransform × 4 processors) to (CompoundBlockOperator(Project) × 1) - SortMergeTransform × 4 processors + Merge (TransformSortMerge × 4 processors) to (CompoundBlockOperator(Project) × 1) + TransformSortMerge × 4 processors SortPartialTransform × 4 processors Merge (Transform Window × 1 processor) to (SortPartialTransform × 4) Transform Window × 1 processor - Merge (SortMergeTransform × 4 processors) to (Transform Window × 1) - SortMergeTransform × 4 processors + Merge (TransformSortMerge × 4 processors) to (Transform Window × 1) + TransformSortMerge × 4 processors SortPartialTransform × 4 processors Merge (DeserializeDataTransform × 1 processor) to (SortPartialTransform × 4) DeserializeDataTransform × 1 processor