From f7504edb590b350be4317b79de58b10cf1276b35 Mon Sep 17 00:00:00 2001 From: zhyass Date: Tue, 26 Sep 2023 11:10:38 +0800 Subject: [PATCH 1/9] feat: tweak table data life-cycle related sql stmts --- src/query/ast/src/ast/format/syntax/ddl.rs | 8 +--- src/query/ast/src/ast/statements/table.rs | 11 +----- src/query/ast/src/parser/statement.rs | 9 ++--- src/query/ast/src/parser/token.rs | 2 - src/query/ast/tests/it/testdata/statement.txt | 22 ----------- src/query/catalog/src/table.rs | 4 +- .../api/rpc/packets/packet_truncate_table.rs | 3 -- .../interpreters/interpreter_table_create.rs | 2 - .../interpreters/interpreter_table_drop.rs | 10 ----- .../interpreter_table_show_create.rs | 3 -- .../interpreter_table_truncate.rs | 6 +-- .../it/storages/fuse/operations/analyze.rs | 2 +- .../it/storages/fuse/operations/truncate.rs | 8 ++-- src/query/service/tests/it/storages/null.rs | 3 +- src/query/sql/src/planner/binder/ddl/table.rs | 8 ---- src/query/sql/src/planner/plans/ddl/table.rs | 1 - src/query/storages/fuse/src/fuse_table.rs | 8 +--- .../storages/fuse/src/operations/commit.rs | 7 ---- .../common/processors/sink_commit.rs | 37 +------------------ .../storages/fuse/src/operations/delete.rs | 6 +-- .../storages/fuse/src/operations/truncate.rs | 19 +--------- .../storages/hive/hive/src/hive_table.rs | 2 +- src/query/storages/memory/src/memory_table.rs | 2 +- src/query/storages/stage/src/stage_table.rs | 2 +- src/query/storages/system/src/log_queue.rs | 2 +- src/query/storages/system/src/table.rs | 2 +- src/query/storages/system/src/tables_table.rs | 12 ------ src/tests/sqlsmith/src/sql_gen/ddl.rs | 1 - 28 files changed, 24 insertions(+), 178 deletions(-) diff --git a/src/query/ast/src/ast/format/syntax/ddl.rs b/src/query/ast/src/ast/format/syntax/ddl.rs index c1eda13639bc..3bcb8a92d69f 100644 --- a/src/query/ast/src/ast/format/syntax/ddl.rs +++ b/src/query/ast/src/ast/format/syntax/ddl.rs @@ -30,13 +30,7 @@ use crate::ast::CreateViewStmt; use crate::ast::TimeTravelPoint; pub(crate) fn pretty_create_table(stmt: CreateTableStmt) -> RcDoc<'static> { - RcDoc::text("CREATE") - .append(if stmt.transient { - RcDoc::space().append(RcDoc::text("TRANSIENT")) - } else { - RcDoc::nil() - }) - .append(RcDoc::space().append(RcDoc::text("TABLE"))) + RcDoc::text("CREATE TABLE") .append(if stmt.if_not_exists { RcDoc::space().append(RcDoc::text("IF NOT EXISTS")) } else { diff --git a/src/query/ast/src/ast/statements/table.rs b/src/query/ast/src/ast/statements/table.rs index d9a9e003915a..11e9c777f212 100644 --- a/src/query/ast/src/ast/statements/table.rs +++ b/src/query/ast/src/ast/statements/table.rs @@ -131,16 +131,11 @@ pub struct CreateTableStmt { pub cluster_by: Vec, pub table_options: BTreeMap, pub as_query: Option>, - pub transient: bool, } impl Display for CreateTableStmt { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { - write!(f, "CREATE ")?; - if self.transient { - write!(f, "TRANSIENT ")?; - } - write!(f, "TABLE ")?; + write!(f, "CREATE TABLE ")?; if self.if_not_exists { write!(f, "IF NOT EXISTS ")?; } @@ -469,7 +464,6 @@ pub struct TruncateTableStmt { pub catalog: Option, pub database: Option, pub table: Identifier, - pub purge: bool, } impl Display for TruncateTableStmt { @@ -482,9 +476,6 @@ impl Display for TruncateTableStmt { .chain(&self.database) .chain(Some(&self.table)), )?; - if self.purge { - write!(f, " PURGE")?; - } Ok(()) } diff --git a/src/query/ast/src/parser/statement.rs b/src/query/ast/src/parser/statement.rs index e505f86ddc54..e9e7b2e2493c 100644 --- a/src/query/ast/src/parser/statement.rs +++ b/src/query/ast/src/parser/statement.rs @@ -536,7 +536,7 @@ pub fn statement(i: Input) -> IResult { ); let create_table = map( rule! { - CREATE ~ TRANSIENT? ~ TABLE ~ ( IF ~ ^NOT ~ ^EXISTS )? + CREATE ~ TABLE ~ ( IF ~ ^NOT ~ ^EXISTS )? ~ #dot_separated_idents_1_to_3 ~ #create_table_source? ~ ( #engine )? @@ -547,7 +547,6 @@ pub fn statement(i: Input) -> IResult { }, |( _, - opt_transient, _, opt_if_not_exists, (catalog, database, table), @@ -571,7 +570,6 @@ pub fn statement(i: Input) -> IResult { .unwrap_or_default(), table_options: opt_table_options.unwrap_or_default(), as_query: opt_as_query.map(|(_, query)| Box::new(query)), - transient: opt_transient.is_some(), }) }, ); @@ -638,14 +636,13 @@ pub fn statement(i: Input) -> IResult { ); let truncate_table = map( rule! { - TRUNCATE ~ TABLE ~ #dot_separated_idents_1_to_3 ~ PURGE? + TRUNCATE ~ TABLE ~ #dot_separated_idents_1_to_3 }, - |(_, _, (catalog, database, table), opt_purge)| { + |(_, _, (catalog, database, table))| { Statement::TruncateTable(TruncateTableStmt { catalog, database, table, - purge: opt_purge.is_some(), }) }, ); diff --git a/src/query/ast/src/parser/token.rs b/src/query/ast/src/parser/token.rs index 723188676413..5833b4f6696b 100644 --- a/src/query/ast/src/parser/token.rs +++ b/src/query/ast/src/parser/token.rs @@ -900,8 +900,6 @@ pub enum TokenKind { TOKEN, #[token("TRAILING", ignore(ascii_case))] TRAILING, - #[token("TRANSIENT", ignore(ascii_case))] - TRANSIENT, #[token("TRIM", ignore(ascii_case))] TRIM, #[token("TRUE", ignore(ascii_case))] diff --git a/src/query/ast/tests/it/testdata/statement.txt b/src/query/ast/tests/it/testdata/statement.txt index e76d0b1d855a..5f8245313b34 100644 --- a/src/query/ast/tests/it/testdata/statement.txt +++ b/src/query/ast/tests/it/testdata/statement.txt @@ -592,7 +592,6 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, - transient: false, }, ) @@ -641,7 +640,6 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, - transient: false, }, ) @@ -721,7 +719,6 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, - transient: false, }, ) @@ -857,7 +854,6 @@ CreateTable( ignore_result: false, }, ), - transient: false, }, ) @@ -941,7 +937,6 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, - transient: false, }, ) @@ -1082,7 +1077,6 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, - transient: false, }, ) @@ -1203,7 +1197,6 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, - transient: false, }, ) @@ -1259,7 +1252,6 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, - transient: false, }, ) @@ -1301,7 +1293,6 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, - transient: false, }, ) @@ -1370,7 +1361,6 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, - transient: false, }, ) @@ -1441,7 +1431,6 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, - transient: false, }, ) @@ -1462,7 +1451,6 @@ TruncateTable( 15..16, ), }, - purge: false, }, ) @@ -1493,7 +1481,6 @@ TruncateTable( 19..20, ), }, - purge: false, }, ) @@ -1801,7 +1788,6 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, - transient: false, }, ) @@ -2359,7 +2345,6 @@ TruncateTable( 15..19, ), }, - purge: false, }, ) @@ -2388,7 +2373,6 @@ TruncateTable( 23..27, ), }, - purge: false, }, ) @@ -2510,7 +2494,6 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, - transient: false, }, ) @@ -2588,7 +2571,6 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, - transient: false, }, ) @@ -2634,7 +2616,6 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, - transient: false, }, ) @@ -2691,7 +2672,6 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, - transient: false, }, ) @@ -2784,7 +2764,6 @@ CreateTable( ignore_result: false, }, ), - transient: false, }, ) @@ -8467,7 +8446,6 @@ CreateTable( "comment": "table comment", }, as_query: None, - transient: false, }, ) diff --git a/src/query/catalog/src/table.rs b/src/query/catalog/src/table.rs index e78036bcc467..7e416d199d94 100644 --- a/src/query/catalog/src/table.rs +++ b/src/query/catalog/src/table.rs @@ -214,8 +214,8 @@ pub trait Table: Sync + Send { } #[async_backtrace::framed] - async fn truncate(&self, ctx: Arc, purge: bool) -> Result<()> { - let (_, _) = (ctx, purge); + async fn truncate(&self, ctx: Arc) -> Result<()> { + let _ = ctx; Ok(()) } diff --git a/src/query/service/src/api/rpc/packets/packet_truncate_table.rs b/src/query/service/src/api/rpc/packets/packet_truncate_table.rs index cc6ddae0c08b..75670a86214c 100644 --- a/src/query/service/src/api/rpc/packets/packet_truncate_table.rs +++ b/src/query/service/src/api/rpc/packets/packet_truncate_table.rs @@ -25,7 +25,6 @@ use crate::api::FlightAction; #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct TruncateTablePacket { - pub purge: bool, pub table_name: String, pub catalog_name: String, pub database_name: String, @@ -38,10 +37,8 @@ impl TruncateTablePacket { table_name: String, catalog_name: String, database_name: String, - purge: bool, ) -> TruncateTablePacket { TruncateTablePacket { - purge, table_name, catalog_name, database_name, diff --git a/src/query/service/src/interpreters/interpreter_table_create.rs b/src/query/service/src/interpreters/interpreter_table_create.rs index f574ef453e3f..d8d6572fd150 100644 --- a/src/query/service/src/interpreters/interpreter_table_create.rs +++ b/src/query/service/src/interpreters/interpreter_table_create.rs @@ -374,8 +374,6 @@ pub static CREATE_TABLE_OPTIONS: Lazy> = Lazy::new(|| { r.insert(OPT_KEY_COMMENT); r.insert(OPT_KEY_ENGINE); - - r.insert("transient"); r }); diff --git a/src/query/service/src/interpreters/interpreter_table_drop.rs b/src/query/service/src/interpreters/interpreter_table_drop.rs index ba9a59b6f7ae..362695d296e3 100644 --- a/src/query/service/src/interpreters/interpreter_table_drop.rs +++ b/src/query/service/src/interpreters/interpreter_table_drop.rs @@ -14,7 +14,6 @@ use std::sync::Arc; -use common_catalog::table::TableExt; use common_exception::ErrorCode; use common_exception::Result; use common_meta_app::schema::DropTableByIdReq; @@ -78,15 +77,6 @@ impl Interpreter for DropTableInterpreter { }) .await?; - // if `plan.all`, truncate, then purge the historical data - if self.plan.all { - let purge = true; - // the above `catalog.drop_table` operation changed the table meta version, - // thus if we do not refresh the table instance, `truncate` will fail - let latest = tbl.as_ref().refresh(self.ctx.as_ref()).await?; - latest.truncate(self.ctx.clone(), purge).await? - } - if let Some((spec_vec, share_table_info)) = resp.spec_vec { save_share_spec( &self.ctx.get_tenant(), diff --git a/src/query/service/src/interpreters/interpreter_table_show_create.rs b/src/query/service/src/interpreters/interpreter_table_show_create.rs index 151d20fcd45b..bb6617be2cba 100644 --- a/src/query/service/src/interpreters/interpreter_table_show_create.rs +++ b/src/query/service/src/interpreters/interpreter_table_show_create.rs @@ -94,9 +94,6 @@ impl Interpreter for ShowCreateTableInterpreter { let n_fields = schema.fields().len(); let mut table_create_sql = format!("CREATE TABLE `{}` (\n", name); - if table.options().contains_key("TRANSIENT") { - table_create_sql = format!("CREATE TRANSIENT TABLE `{}` (\n", name) - } // Append columns. { diff --git a/src/query/service/src/interpreters/interpreter_table_truncate.rs b/src/query/service/src/interpreters/interpreter_table_truncate.rs index a75f1a976b92..2b2eb0836644 100644 --- a/src/query/service/src/interpreters/interpreter_table_truncate.rs +++ b/src/query/service/src/interpreters/interpreter_table_truncate.rs @@ -27,7 +27,6 @@ use crate::sessions::TableContext; pub struct TruncateTableInterpreter { ctx: Arc, - purge: bool, table_name: String, catalog_name: String, database_name: String, @@ -39,7 +38,6 @@ impl TruncateTableInterpreter { pub fn try_create(ctx: Arc, plan: TruncateTablePlan) -> Result { Ok(TruncateTableInterpreter { ctx, - purge: plan.purge, table_name: plan.table, catalog_name: plan.catalog, database_name: plan.database, @@ -50,7 +48,6 @@ impl TruncateTableInterpreter { pub fn from_flight(ctx: Arc, packet: TruncateTablePacket) -> Result { Ok(TruncateTableInterpreter { ctx, - purge: packet.purge, table_name: packet.table_name, catalog_name: packet.catalog_name, database_name: packet.database_name, @@ -84,14 +81,13 @@ impl Interpreter for TruncateTableInterpreter { self.table_name.clone(), self.catalog_name.clone(), self.database_name.clone(), - self.purge, ); truncate_packet.commit(conf.as_ref(), timeout).await?; } } } - table.truncate(self.ctx.clone(), self.purge).await?; + table.truncate(self.ctx.clone()).await?; Ok(PipelineBuildResult::create()) } } diff --git a/src/query/service/tests/it/storages/fuse/operations/analyze.rs b/src/query/service/tests/it/storages/fuse/operations/analyze.rs index bc9498c5ca5a..46eb0cc9c44d 100644 --- a/src/query/service/tests/it/storages/fuse/operations/analyze.rs +++ b/src/query/service/tests/it/storages/fuse/operations/analyze.rs @@ -77,7 +77,7 @@ async fn test_fuse_snapshot_analyze_and_truncate() -> Result<()> { .get_table(ctx.get_tenant().as_str(), &db, &tbl) .await?; let fuse_table = FuseTable::try_from_table(table.as_ref())?; - fuse_table.truncate(ctx, false).await?; + fuse_table.truncate(ctx).await?; } // optimize after truncate table, ts file location will become None diff --git a/src/query/service/tests/it/storages/fuse/operations/truncate.rs b/src/query/service/tests/it/storages/fuse/operations/truncate.rs index eae28a6e3538..967ae9dd6a5f 100644 --- a/src/query/service/tests/it/storages/fuse/operations/truncate.rs +++ b/src/query/service/tests/it/storages/fuse/operations/truncate.rs @@ -34,7 +34,7 @@ async fn test_fuse_table_truncate() -> common_exception::Result<()> { // 1. truncate empty table let prev_version = table.get_table_info().ident.seq; - let r = table.truncate(ctx.clone(), false).await; + let r = table.truncate(ctx.clone()).await; let table = fixture.latest_default_table().await?; // no side effects assert_eq!(prev_version, table.get_table_info().ident.seq); @@ -66,8 +66,7 @@ async fn test_fuse_table_truncate() -> common_exception::Result<()> { assert_eq!(stats.read_rows, (num_blocks * rows_per_block)); // truncate - let purge = false; - let r = table.truncate(ctx.clone(), purge).await; + let r = table.truncate(ctx.clone()).await; assert!(r.is_ok()); // get the latest tbl @@ -147,8 +146,7 @@ async fn test_fuse_table_truncate_appending_concurrently() -> common_exception:: let s2_table_to_appended = fixture.latest_default_table().await?; // 4. perform `truncate purge` operation on s1 - let purge = true; - let r = s1_table_to_be_truncated.truncate(ctx.clone(), purge).await; + let r = s1_table_to_be_truncated.truncate(ctx.clone()).await; // version mismatched, and `truncate purge` should result in error (but nothing should have been removed) assert!(r.is_err()); diff --git a/src/query/service/tests/it/storages/null.rs b/src/query/service/tests/it/storages/null.rs index 2876aceec182..fe5494af29f4 100644 --- a/src/query/service/tests/it/storages/null.rs +++ b/src/query/service/tests/it/storages/null.rs @@ -62,8 +62,7 @@ async fn test_null_table() -> Result<()> { // truncate. { - let purge = false; - table.truncate(ctx, purge).await?; + table.truncate(ctx).await?; } Ok(()) diff --git a/src/query/sql/src/planner/binder/ddl/table.rs b/src/query/sql/src/planner/binder/ddl/table.rs index df82edbccc8f..89525aa02b4b 100644 --- a/src/query/sql/src/planner/binder/ddl/table.rs +++ b/src/query/sql/src/planner/binder/ddl/table.rs @@ -387,7 +387,6 @@ impl Binder { table_options, cluster_by, as_query, - transient, engine, uri_location, } = stmt; @@ -432,11 +431,6 @@ impl Binder { None => (None, "".to_string()), }; - // If table is TRANSIENT, set a flag in table option - if *transient { - options.insert("TRANSIENT".to_owned(), "T".to_owned()); - } - // Build table schema let (schema, field_comments) = match (&source, &as_query) { (Some(source), None) => { @@ -971,7 +965,6 @@ impl Binder { catalog, database, table, - purge, } = stmt; let (catalog, database, table) = @@ -981,7 +974,6 @@ impl Binder { catalog, database, table, - purge: *purge, }))) } diff --git a/src/query/sql/src/planner/plans/ddl/table.rs b/src/query/sql/src/planner/plans/ddl/table.rs index c447a07bb680..ea86d7575ac5 100644 --- a/src/query/sql/src/planner/plans/ddl/table.rs +++ b/src/query/sql/src/planner/plans/ddl/table.rs @@ -324,7 +324,6 @@ pub struct TruncateTablePlan { pub database: String, /// The table name pub table: String, - pub purge: bool, } impl TruncateTablePlan { diff --git a/src/query/storages/fuse/src/fuse_table.rs b/src/query/storages/fuse/src/fuse_table.rs index a0ccda0c0f87..2410c479f309 100644 --- a/src/query/storages/fuse/src/fuse_table.rs +++ b/src/query/storages/fuse/src/fuse_table.rs @@ -338,10 +338,6 @@ impl FuseTable { }) } - pub fn transient(&self) -> bool { - self.table_info.meta.options.contains_key("TRANSIENT") - } - pub fn cluster_key_str(&self) -> Option<&String> { self.cluster_key_meta.as_ref().map(|(_, key)| key) } @@ -553,8 +549,8 @@ impl Table for FuseTable { #[minitrace::trace(name = "fuse_table_truncate")] #[async_backtrace::framed] - async fn truncate(&self, ctx: Arc, purge: bool) -> Result<()> { - self.do_truncate(ctx, purge).await + async fn truncate(&self, ctx: Arc) -> Result<()> { + self.do_truncate(ctx).await } #[minitrace::trace(name = "fuse_table_optimize")] diff --git a/src/query/storages/fuse/src/operations/commit.rs b/src/query/storages/fuse/src/operations/commit.rs index 356902b52ea7..78b32ab80f9f 100644 --- a/src/query/storages/fuse/src/operations/commit.rs +++ b/src/query/storages/fuse/src/operations/commit.rs @@ -451,13 +451,6 @@ impl FuseTable { } } - #[inline] - pub fn is_error_recoverable(e: &ErrorCode, is_table_transient: bool) -> bool { - let code = e.code(); - code == ErrorCode::TABLE_VERSION_MISMATCHED - || (is_table_transient && code == ErrorCode::STORAGE_NOT_FOUND) - } - #[inline] pub fn no_side_effects_in_meta_store(e: &ErrorCode) -> bool { // currently, the only error that we know, which indicates there are no side effects diff --git a/src/query/storages/fuse/src/operations/common/processors/sink_commit.rs b/src/query/storages/fuse/src/operations/common/processors/sink_commit.rs index c2c36deeafb4..3ac5406148f2 100644 --- a/src/query/storages/fuse/src/operations/common/processors/sink_commit.rs +++ b/src/query/storages/fuse/src/operations/common/processors/sink_commit.rs @@ -29,8 +29,6 @@ use common_meta_app::schema::TableInfo; use common_meta_app::schema::UpsertTableCopiedFileReq; use log::debug; use log::error; -use log::info; -use log::warn; use opendal::Operator; use storages_common_table_meta::meta::ClusterKey; use storages_common_table_meta::meta::SnapshotId; @@ -85,7 +83,6 @@ pub struct CommitSink { table: Arc, copied_files: Option, snapshot_gen: F, - transient: bool, retries: u64, max_retry_elapsed: Option, backoff: ExponentialBackoff, @@ -121,7 +118,6 @@ where F: SnapshotGenerator + Send + 'static snapshot_gen, abort_operation: AbortOperation::default(), heartbeat: TableLockHeartbeat::default(), - transient: table.transient(), backoff: ExponentialBackoff::default(), retries: 0, max_retry_elapsed, @@ -138,7 +134,7 @@ where F: SnapshotGenerator + Send + 'static if self.prev_snapshot_id.is_some() && e.code() == ErrorCode::TABLE_VERSION_MISMATCHED { return false; } - FuseTable::is_error_recoverable(e, self.transient) + FuseTable::no_side_effects_in_meta_store(e) } fn read_meta(&mut self) -> Result { @@ -325,37 +321,6 @@ where F: SnapshotGenerator + Send + 'static .await { Ok(_) => { - if self.transient { - // Removes historical data, if table is transient - let latest = self.table.refresh(self.ctx.as_ref()).await?; - let tbl = FuseTable::try_from_table(latest.as_ref())?; - - warn!( - "transient table detected, purging historical data. ({})", - tbl.table_info.ident - ); - - let keep_last_snapshot = true; - let snapshot_files = tbl.list_snapshot_files().await?; - if let Err(e) = tbl - .do_purge( - &self.ctx, - snapshot_files, - None, - keep_last_snapshot, - false, - ) - .await - { - // Errors of GC, if any, are ignored, since GC task can be picked up - warn!( - "GC of transient table not success (this is not a permanent error). the error : {}", - e - ); - } else { - info!("GC of transient table done"); - } - } metrics_inc_commit_mutation_success(); let duration = self.start_time.elapsed(); if let Some(files) = &self.copied_files { diff --git a/src/query/storages/fuse/src/operations/delete.rs b/src/query/storages/fuse/src/operations/delete.rs index e08ef24f1dce..d92faa19b428 100644 --- a/src/query/storages/fuse/src/operations/delete.rs +++ b/src/query/storages/fuse/src/operations/delete.rs @@ -99,8 +99,7 @@ impl FuseTable { }; ctx.get_write_progress().incr(&progress_values); // deleting the whole table... just a truncate - let purge = false; - return self.do_truncate(ctx.clone(), purge).await.map(|_| None); + return self.do_truncate(ctx.clone()).await.map(|_| None); } Some(filters) => filters, }; @@ -122,8 +121,7 @@ impl FuseTable { ctx.get_write_progress().incr(&progress_values); // deleting the whole table... just a truncate - let purge = false; - return self.do_truncate(ctx.clone(), purge).await.map(|_| None); + return self.do_truncate(ctx.clone()).await.map(|_| None); } } Ok(Some(snapshot.clone())) diff --git a/src/query/storages/fuse/src/operations/truncate.rs b/src/query/storages/fuse/src/operations/truncate.rs index 2685d8c6093f..540b714df832 100644 --- a/src/query/storages/fuse/src/operations/truncate.rs +++ b/src/query/storages/fuse/src/operations/truncate.rs @@ -30,7 +30,7 @@ use crate::FuseTable; impl FuseTable { #[inline] #[async_backtrace::framed] - pub async fn do_truncate(&self, ctx: Arc, purge: bool) -> Result<()> { + pub async fn do_truncate(&self, ctx: Arc) -> Result<()> { if let Some(prev_snapshot) = self.read_table_snapshot().await? { // 1. prepare new snapshot let prev_id = prev_snapshot.snapshot_id; @@ -97,23 +97,6 @@ impl FuseTable { new_snapshot_loc, ) .await; - - // best effort to remove historical data. if failed, let `vacuum` to do the job. - // TODO: consider remove the `purge` option from `truncate` - // - it is not a safe operation, there is NO retention interval protection here - // - it is incompatible with time travel features - if purge { - let snapshot_files = self.list_snapshot_files().await?; - let keep_last_snapshot = false; - let ret = self - .do_purge(&ctx, snapshot_files, None, keep_last_snapshot, false) - .await; - if let Err(e) = ret { - return Err(e); - } else { - return Ok(()); - } - } } Ok(()) diff --git a/src/query/storages/hive/hive/src/hive_table.rs b/src/query/storages/hive/hive/src/hive_table.rs index 001cd227a3ce..bcb31d05f0cf 100644 --- a/src/query/storages/hive/hive/src/hive_table.rs +++ b/src/query/storages/hive/hive/src/hive_table.rs @@ -602,7 +602,7 @@ impl Table for HiveTable { } #[async_backtrace::framed] - async fn truncate(&self, _ctx: Arc, _: bool) -> Result<()> { + async fn truncate(&self, _ctx: Arc) -> Result<()> { Err(ErrorCode::Unimplemented(format!( "truncate for table {} is not implemented", self.name() diff --git a/src/query/storages/memory/src/memory_table.rs b/src/query/storages/memory/src/memory_table.rs index 43e1ef1fc7e8..8605eb97e35f 100644 --- a/src/query/storages/memory/src/memory_table.rs +++ b/src/query/storages/memory/src/memory_table.rs @@ -262,7 +262,7 @@ impl Table for MemoryTable { } #[async_backtrace::framed] - async fn truncate(&self, _ctx: Arc, _: bool) -> Result<()> { + async fn truncate(&self, _ctx: Arc) -> Result<()> { let mut blocks = self.blocks.write(); blocks.clear(); Ok(()) diff --git a/src/query/storages/stage/src/stage_table.rs b/src/query/storages/stage/src/stage_table.rs index 3885b84d4bbe..3eb8d8e4baa1 100644 --- a/src/query/storages/stage/src/stage_table.rs +++ b/src/query/storages/stage/src/stage_table.rs @@ -274,7 +274,7 @@ impl Table for StageTable { // Truncate the stage file. #[async_backtrace::framed] - async fn truncate(&self, _ctx: Arc, _: bool) -> Result<()> { + async fn truncate(&self, _ctx: Arc) -> Result<()> { Err(ErrorCode::Unimplemented( "S3 external table truncate() unimplemented yet!", )) diff --git a/src/query/storages/system/src/log_queue.rs b/src/query/storages/system/src/log_queue.rs index 748623ca3cf2..9a4dc18550c7 100644 --- a/src/query/storages/system/src/log_queue.rs +++ b/src/query/storages/system/src/log_queue.rs @@ -215,7 +215,7 @@ impl Table for SystemLogTable { } #[async_backtrace::framed] - async fn truncate(&self, _ctx: Arc, _: bool) -> Result<()> { + async fn truncate(&self, _ctx: Arc) -> Result<()> { let log_queue = SystemLogQueue::::instance()?; let mut write_guard = log_queue.data.write(); diff --git a/src/query/storages/system/src/table.rs b/src/query/storages/system/src/table.rs index 8d01a1c0f3d6..55dbfb8648e5 100644 --- a/src/query/storages/system/src/table.rs +++ b/src/query/storages/system/src/table.rs @@ -149,7 +149,7 @@ impl Table for SyncOneBlockSystemTable, _purge: bool) -> Result<()> { + async fn truncate(&self, ctx: Arc) -> Result<()> { self.inner_table.truncate(ctx) } diff --git a/src/query/storages/system/src/tables_table.rs b/src/query/storages/system/src/tables_table.rs index 51849ac19493..0e1cdec4d325 100644 --- a/src/query/storages/system/src/tables_table.rs +++ b/src/query/storages/system/src/tables_table.rs @@ -263,16 +263,6 @@ where TablesTable: HistoryAware }) .collect(); let cluster_bys: Vec> = cluster_bys.iter().map(|s| s.as_bytes().to_vec()).collect(); - let is_transient: Vec> = database_tables - .iter() - .map(|v| { - if v.options().contains_key("TRANSIENT") { - "TRANSIENT".as_bytes().to_vec() - } else { - vec![] - } - }) - .collect(); Ok(DataBlock::new_from_columns(vec![ StringType::from_data(catalogs), StringType::from_data(databases), @@ -281,7 +271,6 @@ where TablesTable: HistoryAware StringType::from_data(engines), StringType::from_data(engines_full), StringType::from_data(cluster_bys), - StringType::from_data(is_transient), StringType::from_data(created_owns), StringType::from_data(dropped_owns), TimestampType::from_data(updated_on), @@ -308,7 +297,6 @@ where TablesTable: HistoryAware TableField::new("engine", TableDataType::String), TableField::new("engine_full", TableDataType::String), TableField::new("cluster_by", TableDataType::String), - TableField::new("is_transient", TableDataType::String), TableField::new("created_on", TableDataType::String), TableField::new("dropped_on", TableDataType::String), TableField::new("updated_on", TableDataType::Timestamp), diff --git a/src/tests/sqlsmith/src/sql_gen/ddl.rs b/src/tests/sqlsmith/src/sql_gen/ddl.rs index ef981e2b8463..2e45b07787c2 100644 --- a/src/tests/sqlsmith/src/sql_gen/ddl.rs +++ b/src/tests/sqlsmith/src/sql_gen/ddl.rs @@ -79,7 +79,6 @@ impl<'a, R: Rng> SqlGenerator<'a, R> { cluster_by: vec![], table_options: BTreeMap::new(), as_query: None, - transient: false, }; tables.push((drop_table, create_table)); } From 3b84033d33fea1ef07798a9381f9d317faae3442 Mon Sep 17 00:00:00 2001 From: zhyass Date: Tue, 26 Sep 2023 18:13:09 +0800 Subject: [PATCH 2/9] fix test --- benchmark/clickbench/hits/create.sql | 2 +- benchmark/clickbench/hits/create_local.sql | 2 +- .../00-ddl/20-table/40-ddl-truncate-table.md | 10 +-- .../00-ddl/20-table/70-flashback-table.md | 2 +- scripts/benchmark/query/load/hits.sh | 2 +- src/query/ast/src/parser/statement.rs | 2 +- .../ast/tests/it/testdata/statement-error.txt | 4 +- .../tests/it/storages/fuse/operations/mod.rs | 1 - .../fuse/operations/purge_truncate.rs | 75 ------------------- .../it/storages/testdata/columns_table.txt | 2 - .../base/05_ddl/05_0000_ddl_create_tables | 2 +- .../09_fuse_engine/09_0017_transient_table | 41 ---------- website/blog/2022-10-10-time-travel.md | 6 -- 13 files changed, 10 insertions(+), 141 deletions(-) delete mode 100644 src/query/service/tests/it/storages/fuse/operations/purge_truncate.rs delete mode 100644 tests/sqllogictests/suites/base/09_fuse_engine/09_0017_transient_table diff --git a/benchmark/clickbench/hits/create.sql b/benchmark/clickbench/hits/create.sql index 836c55c728a0..b446288b409e 100644 --- a/benchmark/clickbench/hits/create.sql +++ b/benchmark/clickbench/hits/create.sql @@ -1,4 +1,4 @@ -CREATE TRANSIENT TABLE hits +CREATE TABLE hits ( WatchID BIGINT NOT NULL, JavaEnable SMALLINT NOT NULL, diff --git a/benchmark/clickbench/hits/create_local.sql b/benchmark/clickbench/hits/create_local.sql index 3c0c7d51ed66..8bf12bf2c65b 100644 --- a/benchmark/clickbench/hits/create_local.sql +++ b/benchmark/clickbench/hits/create_local.sql @@ -1,4 +1,4 @@ -CREATE TRANSIENT TABLE hits +CREATE TABLE hits ( WatchID BIGINT NOT NULL, JavaEnable SMALLINT NOT NULL, diff --git a/docs/doc/14-sql-commands/00-ddl/20-table/40-ddl-truncate-table.md b/docs/doc/14-sql-commands/00-ddl/20-table/40-ddl-truncate-table.md index 68592fc28c60..9f8cabac2fc2 100644 --- a/docs/doc/14-sql-commands/00-ddl/20-table/40-ddl-truncate-table.md +++ b/docs/doc/14-sql-commands/00-ddl/20-table/40-ddl-truncate-table.md @@ -2,14 +2,14 @@ title: TRUNCATE TABLE --- -Removes all data from a table while preserving the table's schema. It deletes all rows in the table, making it an empty table with the same columns and constraints. Please note that, it does not release the disk space allocated to the table. To release the disk space, include the PURGE option, which is used to release the disk space allocated to the table when the truncate operation is performed. +Removes all data from a table while preserving the table's schema. It deletes all rows in the table, making it an empty table with the same columns and constraints. Please note that, it does not release the disk space allocated to the table. See also: [DROP TABLE](20-ddl-drop-table.md) ## Syntax ```sql -TRUNCATE TABLE [db.]table_name [PURGE] +TRUNCATE TABLE [db.]table_name ``` ## Examples @@ -50,10 +50,4 @@ FROM test_truncate 0 row in 0.017 sec. Processed 0 rows, 0B (0 rows/s, 0B/s) - -root@localhost> TRUNCATE TABLE test_truncate PURGE; - -TRUNCATE TABLE test_truncate PURGE - -0 row in 0.118 sec. Processed 0 rows, 0B (0 rows/s, 0B/s) ``` \ No newline at end of file diff --git a/docs/doc/14-sql-commands/00-ddl/20-table/70-flashback-table.md b/docs/doc/14-sql-commands/00-ddl/20-table/70-flashback-table.md index be2dd8036e03..b5fade77c66f 100644 --- a/docs/doc/14-sql-commands/00-ddl/20-table/70-flashback-table.md +++ b/docs/doc/14-sql-commands/00-ddl/20-table/70-flashback-table.md @@ -10,7 +10,7 @@ The capability to flash back a table is subject to these conditions: - The command only existing tables to their prior states. To recover a dropped table, use [UNDROP TABLE](21-ddl-undrop-table.md). -- Flashback a table is part of Databend's time travel feature. Before using the command, make sure the table you want to flashback is eligible for time travel. For example, the command doesn't work for transient tables because Databend does not create or store snapshots for such tables. +- Flashback a table is part of Databend's time travel feature. Before using the command, make sure the table you want to flashback is eligible for time travel. - You cannot roll back after flashback a table to a prior state, but you can flash back the table again to an earlier state. diff --git a/scripts/benchmark/query/load/hits.sh b/scripts/benchmark/query/load/hits.sh index 9754e6020532..d8c9fb372a98 100755 --- a/scripts/benchmark/query/load/hits.sh +++ b/scripts/benchmark/query/load/hits.sh @@ -11,7 +11,7 @@ DROP TABLE IF EXISTS hits ALL; SQL cat < IResult { | #undrop_table : "`UNDROP TABLE [.]`" | #alter_table : "`ALTER TABLE [.]
`" | #rename_table : "`RENAME TABLE [.]
TO `" - | #truncate_table : "`TRUNCATE TABLE [.]
[PURGE]`" + | #truncate_table : "`TRUNCATE TABLE [.]
`" | #optimize_table : "`OPTIMIZE TABLE [.]
(ALL | PURGE | COMPACT [SEGMENT])`" | #vacuum_table : "`VACUUM TABLE [.]
[RETAIN number HOURS] [DRY RUN]`" | #vacuum_drop_table : "`VACUUM DROP TABLE [FROM [.]] [RETAIN number HOURS] [DRY RUN]`" diff --git a/src/query/ast/tests/it/testdata/statement-error.txt b/src/query/ast/tests/it/testdata/statement-error.txt index b76a5ad67653..d3a5bfe46ea8 100644 --- a/src/query/ast/tests/it/testdata/statement-error.txt +++ b/src/query/ast/tests/it/testdata/statement-error.txt @@ -109,7 +109,7 @@ error: --> SQL:1:21 | 1 | truncate table a.b.c.d - | ^ expected `PURGE`, `FORMAT`, or `;` + | ^ expected `FORMAT` or `;` ---------- Input ---------- @@ -121,7 +121,7 @@ error: 1 | truncate a | -------- ^ expected `TABLE` | | - | while parsing `TRUNCATE TABLE [.]
[PURGE]` + | while parsing `TRUNCATE TABLE [.]
` ---------- Input ---------- diff --git a/src/query/service/tests/it/storages/fuse/operations/mod.rs b/src/query/service/tests/it/storages/fuse/operations/mod.rs index 9d9d8e8a6175..7b22f3c11852 100644 --- a/src/query/service/tests/it/storages/fuse/operations/mod.rs +++ b/src/query/service/tests/it/storages/fuse/operations/mod.rs @@ -23,7 +23,6 @@ mod mutation; mod navigate; mod optimize; mod purge_drop; -mod purge_truncate; mod read_plan; mod replace_into; mod table_analyze; diff --git a/src/query/service/tests/it/storages/fuse/operations/purge_truncate.rs b/src/query/service/tests/it/storages/fuse/operations/purge_truncate.rs deleted file mode 100644 index 86c6baf7e692..000000000000 --- a/src/query/service/tests/it/storages/fuse/operations/purge_truncate.rs +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2021 Datafuse Labs. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -use common_base::base::tokio; -use common_exception::Result; -use databend_query::test_kits::table_test_fixture::append_sample_data; -use databend_query::test_kits::table_test_fixture::check_data_dir; -use databend_query::test_kits::table_test_fixture::execute_command; -use databend_query::test_kits::table_test_fixture::history_should_have_item; -use databend_query::test_kits::table_test_fixture::TestFixture; - -#[tokio::test(flavor = "multi_thread")] -async fn test_fuse_truncate_purge_stmt() -> Result<()> { - let fixture = TestFixture::new().await; - let db = fixture.default_db_name(); - let tbl = fixture.default_table_name(); - let ctx = fixture.ctx(); - fixture.create_default_table().await?; - - // ingests some 2 blocks - append_sample_data(1, &fixture).await?; - append_sample_data(1, &fixture).await?; - - let expected_index_count = 2; - // there should be some data there: 2 snapshot, 2 segment, 2 block - check_data_dir( - &fixture, - "truncate_purge", - 2, - 0, - 2, - 2, - expected_index_count, - Some(()), - None, - ) - .await?; - - // let's truncate - let qry = format!("truncate table {}.{} purge", db, tbl); - execute_command(ctx.clone(), qry.as_str()).await?; - - // one history item left there - history_should_have_item( - &fixture, - "after_truncate_there_should_be_one_history_item_left", - 1, - ) - .await?; - - // there should be only a snapshot file left there, no segments or blocks - check_data_dir( - &fixture, - "truncate_after_purge_check_file_items", - 1, - 0, - 0, - 0, - 0, - Some(()), - None, - ) - .await?; - Ok(()) -} diff --git a/src/query/service/tests/it/storages/testdata/columns_table.txt b/src/query/service/tests/it/storages/testdata/columns_table.txt index 4e0d627aa1ed..7f8b98d7cde3 100644 --- a/src/query/service/tests/it/storages/testdata/columns_table.txt +++ b/src/query/service/tests/it/storages/testdata/columns_table.txt @@ -144,8 +144,6 @@ DB.Table: 'system'.'columns', Table: columns-table_id:1, ver:0, Engine: SystemCo | 'is_insertable_into' | 'information_schema' | 'views' | 'Boolean' | 'BOOLEAN' | '' | '' | 'NO' | '' | | 'is_nullable' | 'information_schema' | 'columns' | 'String' | 'VARCHAR' | '' | '' | 'NO' | '' | | 'is_nullable' | 'system' | 'columns' | 'String' | 'VARCHAR' | '' | '' | 'NO' | '' | -| 'is_transient' | 'system' | 'tables' | 'String' | 'VARCHAR' | '' | '' | 'NO' | '' | -| 'is_transient' | 'system' | 'tables_with_history' | 'String' | 'VARCHAR' | '' | '' | 'NO' | '' | | 'is_trigger_deletable' | 'information_schema' | 'views' | 'UInt8' | 'TINYINT UNSIGNED' | '' | '' | 'NO' | '' | | 'is_trigger_insertable_into' | 'information_schema' | 'views' | 'UInt8' | 'TINYINT UNSIGNED' | '' | '' | 'NO' | '' | | 'is_trigger_updatable' | 'information_schema' | 'views' | 'UInt8' | 'TINYINT UNSIGNED' | '' | '' | 'NO' | '' | diff --git a/tests/sqllogictests/suites/base/05_ddl/05_0000_ddl_create_tables b/tests/sqllogictests/suites/base/05_ddl/05_0000_ddl_create_tables index 611907fe6191..c2a7f5c96207 100644 --- a/tests/sqllogictests/suites/base/05_ddl/05_0000_ddl_create_tables +++ b/tests/sqllogictests/suites/base/05_ddl/05_0000_ddl_create_tables @@ -195,7 +195,7 @@ map MAP(INT32, STRING) NO {} (empty) variant VARIANT NO (empty) (empty) statement ok -create transient table db2.test8(tiny TINYINT not null, tiny_unsigned TINYINT UNSIGNED not null, smallint SMALLINT not null, smallint_unsigned SMALLINT UNSIGNED not null, int INT not null, int_unsigned INT UNSIGNED not null, bigint BIGINT not null, bigint_unsigned BIGINT UNSIGNED not null,float FLOAT not null, double DOUBLE not null, date DATE not null, datetime DATETIME not null, ts TIMESTAMP not null, str VARCHAR not null default '3', bool BOOLEAN not null, arr ARRAY(VARCHAR) not null, tup TUPLE(DOUBLE, INT) not null, map MAP(STRING, Date) not null, variant VARIANT not null) +create table db2.test8(tiny TINYINT not null, tiny_unsigned TINYINT UNSIGNED not null, smallint SMALLINT not null, smallint_unsigned SMALLINT UNSIGNED not null, int INT not null, int_unsigned INT UNSIGNED not null, bigint BIGINT not null, bigint_unsigned BIGINT UNSIGNED not null,float FLOAT not null, double DOUBLE not null, date DATE not null, datetime DATETIME not null, ts TIMESTAMP not null, str VARCHAR not null default '3', bool BOOLEAN not null, arr ARRAY(VARCHAR) not null, tup TUPLE(DOUBLE, INT) not null, map MAP(STRING, Date) not null, variant VARIANT not null) query TTTTT desc db2.test8 diff --git a/tests/sqllogictests/suites/base/09_fuse_engine/09_0017_transient_table b/tests/sqllogictests/suites/base/09_fuse_engine/09_0017_transient_table deleted file mode 100644 index 50a3db5b52a0..000000000000 --- a/tests/sqllogictests/suites/base/09_fuse_engine/09_0017_transient_table +++ /dev/null @@ -1,41 +0,0 @@ -statement ok -DROP DATABASE IF EXISTS db1 - -statement ok -CREATE DATABASE db1 - -statement ok -USE db1 - -statement ok -CREATE TRANSIENT TABLE IF NOT EXISTS t09_0016(a int) - -statement ok -INSERT INTO t09_0016 VALUES(1) - -statement ok -INSERT INTO t09_0016 VALUES(2) - -statement ok -INSERT INTO t09_0016 VALUES(3) - -query I -select * from t09_0016 order by a ----- -1 -2 -3 - -query B -select count(*)=1 from fuse_snapshot('db1', 't09_0016') ----- -1 - - - -statement ok -DROP TABLE t09_0016 - -statement ok -DROP DATABASE db1 - diff --git a/website/blog/2022-10-10-time-travel.md b/website/blog/2022-10-10-time-travel.md index 4316f1146d3e..34d637ff396e 100644 --- a/website/blog/2022-10-10-time-travel.md +++ b/website/blog/2022-10-10-time-travel.md @@ -40,9 +40,3 @@ The saved snapshots are the behind-the-scenes heroes that make the time travel b The Time Travel feature makes it possible to create an OLD table, which means you can create a table to hold and move on from a previous version of your data. The [CREATE TABLE](https://databend.rs/doc/sql-commands/ddl/table/ddl-create-table) statement can include a [SNAPSHOT_LOCATION](https://databend.rs/doc/sql-commands/ddl/table/ddl-create-table#create-table--snapshot_location) clause that allows you to specify a snapshot file that holds your old data. This command enables you to insert the data stored in the snapshot file when you create a table. Please note that the table you create must have same column definitions as the data from the snapshot. - -## Go without Time Travel - -Tables in Databend support Time Travel out-of-the-box. However, you might not need it for some cases, for example, when you're running low of your storage space or the data is big but unimportant. Databend currently does not provide a setting to switch it off, but you can [CREATE TRANSIENT TABLE](https://databend.rs/doc/sql-commands/ddl/table/ddl-create-table#create-transient-table). - -Transient tables are used to hold transitory data that does not require a data protection or recovery mechanism. Databend does not hold historical data for a transient table so you will not be able to query from a previous version of the transient table with the Time Travel feature, for example, the AT clause in the SELECT statement will not work for transient tables. Please note that you can still drop and undrop a transient table. \ No newline at end of file From 1f413a45e64e2bc9914a0f0659cdb4a0ad02df41 Mon Sep 17 00:00:00 2001 From: zhyass Date: Tue, 26 Sep 2023 19:01:28 +0800 Subject: [PATCH 3/9] fix test --- .../service/tests/it/servers/http/http_query_handlers.rs | 2 +- .../tests/it/storages/fuse/operations/purge_drop.rs | 8 ++++---- .../suites/base/09_fuse_engine/09_0006_func_fuse_history | 2 +- ...unc_fuse_truncate_purge => 09_0007_func_fuse_truncate} | 4 ++-- .../base/09_fuse_engine/09_0016_remote_alter_recluster | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) rename tests/sqllogictests/suites/base/09_fuse_engine/{09_0007_func_fuse_truncate_purge => 09_0007_func_fuse_truncate} (95%) diff --git a/src/query/service/tests/it/servers/http/http_query_handlers.rs b/src/query/service/tests/it/servers/http/http_query_handlers.rs index 5ef40a3edde6..66a01b57679d 100644 --- a/src/query/service/tests/it/servers/http/http_query_handlers.rs +++ b/src/query/service/tests/it/servers/http/http_query_handlers.rs @@ -128,7 +128,7 @@ async fn test_simple_sql() -> Result<()> { assert_eq!(result.state, ExecuteStateKind::Succeeded, "{:?}", result); assert_eq!(result.next_uri, Some(final_uri.clone()), "{:?}", result); assert_eq!(result.data.len(), 10, "{:?}", result); - assert_eq!(result.schema.len(), 18, "{:?}", result); + assert_eq!(result.schema.len(), 17, "{:?}", result); // get state let uri = make_state_uri(query_id); diff --git a/src/query/service/tests/it/storages/fuse/operations/purge_drop.rs b/src/query/service/tests/it/storages/fuse/operations/purge_drop.rs index 4c9e7762c5ca..9cccb0d6ee38 100644 --- a/src/query/service/tests/it/storages/fuse/operations/purge_drop.rs +++ b/src/query/service/tests/it/storages/fuse/operations/purge_drop.rs @@ -51,12 +51,12 @@ async fn test_fuse_snapshot_truncate_in_drop_all_stmt() -> Result<()> { check_data_dir( &fixture, - "drop table: there should be 1 snapshot, 0 segment/block", + "drop table: there should be 1 snapshot, 1 segment/block", 1, // 1 snapshot 0, // 0 snapshot statistic - 0, // 0 segments - 0, // 0 blocks - 0, // 0 index + 1, // 0 segments + 1, // 0 blocks + 1, // 0 index None, None, ) diff --git a/tests/sqllogictests/suites/base/09_fuse_engine/09_0006_func_fuse_history b/tests/sqllogictests/suites/base/09_fuse_engine/09_0006_func_fuse_history index 522df13c0f26..74caa07f7c9a 100644 --- a/tests/sqllogictests/suites/base/09_fuse_engine/09_0006_func_fuse_history +++ b/tests/sqllogictests/suites/base/09_fuse_engine/09_0006_func_fuse_history @@ -81,7 +81,7 @@ select count() from fuse_block('db_09_0006', 't1') 5 statement ok -truncate table t1 purge +truncate table t1 query I select block_size from fuse_block('db_09_0006', 't1') diff --git a/tests/sqllogictests/suites/base/09_fuse_engine/09_0007_func_fuse_truncate_purge b/tests/sqllogictests/suites/base/09_fuse_engine/09_0007_func_fuse_truncate similarity index 95% rename from tests/sqllogictests/suites/base/09_fuse_engine/09_0007_func_fuse_truncate_purge rename to tests/sqllogictests/suites/base/09_fuse_engine/09_0007_func_fuse_truncate index 54d0d969018c..0af63cefa0bc 100644 --- a/tests/sqllogictests/suites/base/09_fuse_engine/09_0007_func_fuse_truncate_purge +++ b/tests/sqllogictests/suites/base/09_fuse_engine/09_0007_func_fuse_truncate @@ -25,12 +25,12 @@ select count(*) from fuse_snapshot('db_09_0007', 't') 3 statement ok -truncate table `t` purge +truncate table `t` query I select count(*) from fuse_snapshot('db_09_0007', 't') ---- -1 +4 statement ok select * from t diff --git a/tests/sqllogictests/suites/base/09_fuse_engine/09_0016_remote_alter_recluster b/tests/sqllogictests/suites/base/09_fuse_engine/09_0016_remote_alter_recluster index 367af1a8242c..b65f946d7aeb 100644 --- a/tests/sqllogictests/suites/base/09_fuse_engine/09_0016_remote_alter_recluster +++ b/tests/sqllogictests/suites/base/09_fuse_engine/09_0016_remote_alter_recluster @@ -82,7 +82,7 @@ select average_overlaps, average_depth, block_depth_histogram from clustering_in # test trim string statement ok -truncate table t3 purge +truncate table t3 statement ok insert into t3 values(1,'123456780'),(2,'123456781') From 3779c59008a906c64bd889caedf222dfec842d3f Mon Sep 17 00:00:00 2001 From: zhyass Date: Tue, 26 Sep 2023 21:21:10 +0800 Subject: [PATCH 4/9] remove drop all --- benchmark/clickbench/hits/clear.sql | 3 +- benchmark/clickbench/tpch/clear.sql | 17 ++++++----- benchmark/tpcds/load_data.sh | 4 ++- .../00-ddl/20-table/20-ddl-drop-table.md | 3 -- scripts/benchmark/query/load/hits.sh | 6 +++- src/query/ast/src/ast/statements/table.rs | 4 --- src/query/ast/src/parser/statement.rs | 5 ++-- src/query/ast/tests/it/testdata/statement.txt | 4 --- .../tests/it/aggregating_index/index_scan.rs | 4 +-- .../tests/it/storages/fuse/operations/gc.rs | 6 ++-- .../it/storages/fuse/operations/purge_drop.rs | 30 ------------------- src/query/sql/src/planner/binder/ddl/table.rs | 2 -- src/query/sql/src/planner/plans/ddl/table.rs | 1 - .../block/block_reader_parquet_deserialize.rs | 2 +- src/tests/sqlsmith/src/sql_gen/ddl.rs | 1 - .../base/01_system/01_0002_system_query_log | 2 +- .../01_0007_system_clustering_history | 2 +- .../base/03_common/03_0003_select_group_by | 2 +- .../suites/base/03_common/03_0025_delete_from | 14 ++++----- .../base/03_common/03_0028_copy_into_stage | 2 +- .../03_common/03_0031_copy_into_user_stage | 2 +- .../suites/base/03_common/03_0035_update | 6 ++-- .../base/05_ddl/05_0001_ddl_drop_table_full | 4 +-- .../suites/base/05_ddl/05_0023_exists_table | 2 +- .../12_time_travel/12_0003_time_travel_undrop | 17 ----------- .../suites/base/issues/issue_10103.test | 4 +-- .../group/group_by_grouping_sets.test | 4 +-- .../mode/cluster/04_0002_explain_v2.test | 4 +-- .../mode/standalone/explain/explain.test | 4 +-- .../standalone/explain_native/explain.test | 4 +-- .../02_function/02_0012_function_datetimes | 2 +- .../02_function/02_0012_function_datetimes_tz | 2 +- .../query/02_function/02_0014_function_maths | 2 +- .../02_0018_function_strings_repeat | 8 ++--- ...0048_function_semi_structureds_object_keys | 2 +- tests/sqllogictests/suites/query/cte.test | 2 +- .../suites/query/render_result.test | 4 +-- tests/sqllogictests/suites/ydb/select1-1.test | 2 +- tests/sqllogictests/suites/ydb/select1-2.test | 2 +- tests/sqllogictests/suites/ydb/select1-3.test | 2 +- tests/sqllogictests/suites/ydb/select1-4.test | 2 +- tests/sqllogictests/suites/ydb/select1-5.test | 2 +- tests/sqllogictests/suites/ydb/select2-1.test | 2 +- tests/sqllogictests/suites/ydb/select2-2.test | 2 +- tests/sqllogictests/suites/ydb/select2-3.test | 2 +- tests/sqllogictests/suites/ydb/select2-4.test | 2 +- tests/sqllogictests/suites/ydb/select2-5.test | 2 +- tests/sqllogictests/suites/ydb/select3-1.test | 2 +- .../sqllogictests/suites/ydb/select3-10.test | 2 +- .../sqllogictests/suites/ydb/select3-11.test | 2 +- .../sqllogictests/suites/ydb/select3-12.test | 2 +- .../sqllogictests/suites/ydb/select3-13.test | 2 +- .../sqllogictests/suites/ydb/select3-14.test | 2 +- .../sqllogictests/suites/ydb/select3-15.test | 2 +- tests/sqllogictests/suites/ydb/select3-2.test | 2 +- tests/sqllogictests/suites/ydb/select3-3.test | 2 +- tests/sqllogictests/suites/ydb/select3-4.test | 2 +- tests/sqllogictests/suites/ydb/select3-5.test | 2 +- tests/sqllogictests/suites/ydb/select3-6.test | 2 +- tests/sqllogictests/suites/ydb/select3-7.test | 2 +- tests/sqllogictests/suites/ydb/select3-8.test | 2 +- tests/sqllogictests/suites/ydb/select3-9.test | 2 +- .../17_0002_alter_table_purge_before.sh | 4 +-- .../17_0003_alter_table_update.sh | 4 +-- .../20+_others/20_0011_purge_before.sh | 4 +-- .../20+_others/20_0012_privilege_access.sh | 6 ++-- .../04_mini_dataset/04_0001_mini_hits.sh | 2 +- .../05_01_00_load_compact_copy.sh | 2 +- .../05_01_01_load_compact_streaming_load.sh | 2 +- .../05_01_02_load_compact_copy_max_size.sh | 2 +- ...5_01_02_load_compact_copy_row_per_block.sh | 2 +- .../01_vacuum/01_0002_ee_vacuum_drop_table.sh | 1 + 72 files changed, 106 insertions(+), 160 deletions(-) diff --git a/benchmark/clickbench/hits/clear.sql b/benchmark/clickbench/hits/clear.sql index e70c0347a8da..af216bba119f 100644 --- a/benchmark/clickbench/hits/clear.sql +++ b/benchmark/clickbench/hits/clear.sql @@ -1 +1,2 @@ -drop table hits all; +drop table hits; +VACUUM DROP TABLE; diff --git a/benchmark/clickbench/tpch/clear.sql b/benchmark/clickbench/tpch/clear.sql index ded376e4a710..54c2f37aab34 100644 --- a/benchmark/clickbench/tpch/clear.sql +++ b/benchmark/clickbench/tpch/clear.sql @@ -1,8 +1,9 @@ -drop table customer all; -drop table lineitem all; -drop table nation all; -drop table orders all; -drop table partsupp all; -drop table part all; -drop table region all; -drop table supplier all; +drop table customer; +drop table lineitem; +drop table nation; +drop table orders; +drop table partsupp; +drop table part; +drop table region; +drop table supplier; +VACUUM DROP TABLE; diff --git a/benchmark/tpcds/load_data.sh b/benchmark/tpcds/load_data.sh index b2f71f834d31..521c63ea639a 100755 --- a/benchmark/tpcds/load_data.sh +++ b/benchmark/tpcds/load_data.sh @@ -36,9 +36,11 @@ tables=( # Clear Data for t in ${tables[@]} do - echo "DROP TABLE IF EXISTS $t ALL" | $MYSQL_CLIENT_CONNECT + echo "DROP TABLE IF EXISTS $t" | $MYSQL_CLIENT_CONNECT done +echo "VACUUM DROP TABLE" | $MYSQL_CLIENT_CONNECT + # Create Tables; cat "$CURDIR"/tpcds.sql | $MYSQL_CLIENT_CONNECT diff --git a/docs/doc/14-sql-commands/00-ddl/20-table/20-ddl-drop-table.md b/docs/doc/14-sql-commands/00-ddl/20-table/20-ddl-drop-table.md index 704f7253825b..0ca931eae633 100644 --- a/docs/doc/14-sql-commands/00-ddl/20-table/20-ddl-drop-table.md +++ b/docs/doc/14-sql-commands/00-ddl/20-table/20-ddl-drop-table.md @@ -18,9 +18,6 @@ DROP TABLE [IF EXISTS] [db.]name :::caution `DROP TABLE` only remove the table schema from meta service, we do not remove the underlying data from the storage. -If you want to delete the data and table all, please use: - -`DROP TABLE ALL;` ::: diff --git a/scripts/benchmark/query/load/hits.sh b/scripts/benchmark/query/load/hits.sh index d8c9fb372a98..60e69bb9e5f3 100755 --- a/scripts/benchmark/query/load/hits.sh +++ b/scripts/benchmark/query/load/hits.sh @@ -7,7 +7,11 @@ select version(); SQL cat <, pub database: Option, pub table: Identifier, - pub all: bool, } impl Display for DropTableStmt { @@ -267,9 +266,6 @@ impl Display for DropTableStmt { .chain(&self.database) .chain(Some(&self.table)), )?; - if self.all { - write!(f, " ALL")?; - } Ok(()) } diff --git a/src/query/ast/src/parser/statement.rs b/src/query/ast/src/parser/statement.rs index f9701be5bdf4..8b245c2fbc62 100644 --- a/src/query/ast/src/parser/statement.rs +++ b/src/query/ast/src/parser/statement.rs @@ -575,15 +575,14 @@ pub fn statement(i: Input) -> IResult { ); let drop_table = map( rule! { - DROP ~ TABLE ~ ( IF ~ ^EXISTS )? ~ #dot_separated_idents_1_to_3 ~ ALL? + DROP ~ TABLE ~ ( IF ~ ^EXISTS )? ~ #dot_separated_idents_1_to_3 }, - |(_, _, opt_if_exists, (catalog, database, table), opt_all)| { + |(_, _, opt_if_exists, (catalog, database, table))| { Statement::DropTable(DropTableStmt { if_exists: opt_if_exists.is_some(), catalog, database, table, - all: opt_all.is_some(), }) }, ); diff --git a/src/query/ast/tests/it/testdata/statement.txt b/src/query/ast/tests/it/testdata/statement.txt index 5f8245313b34..3451a0037665 100644 --- a/src/query/ast/tests/it/testdata/statement.txt +++ b/src/query/ast/tests/it/testdata/statement.txt @@ -1502,7 +1502,6 @@ DropTable( 11..12, ), }, - all: false, }, ) @@ -1534,7 +1533,6 @@ DropTable( 23..26, ), }, - all: false, }, ) @@ -2394,7 +2392,6 @@ DropTable( 11..17, ), }, - all: false, }, ) @@ -2416,7 +2413,6 @@ DropTable( 21..27, ), }, - all: false, }, ) diff --git a/src/query/ee/tests/it/aggregating_index/index_scan.rs b/src/query/ee/tests/it/aggregating_index/index_scan.rs index b5c955f142c4..9316863ac46e 100644 --- a/src/query/ee/tests/it/aggregating_index/index_scan.rs +++ b/src/query/ee/tests/it/aggregating_index/index_scan.rs @@ -1085,8 +1085,8 @@ async fn test_fuzz_impl(format: &str) -> Result<()> { } // Clear data - execute_sql(fixture.ctx(), "DROP TABLE rt ALL").await?; - execute_sql(fixture.ctx(), "DROP TABLE t ALL").await?; + execute_sql(fixture.ctx(), "DROP TABLE rt").await?; + execute_sql(fixture.ctx(), "DROP TABLE t").await?; } } Ok(()) diff --git a/src/query/service/tests/it/storages/fuse/operations/gc.rs b/src/query/service/tests/it/storages/fuse/operations/gc.rs index 512436f2d875..447ddcc394c0 100644 --- a/src/query/service/tests/it/storages/fuse/operations/gc.rs +++ b/src/query/service/tests/it/storages/fuse/operations/gc.rs @@ -114,9 +114,9 @@ async fn test_fuse_purge_normal_orphan_snapshot() -> Result<()> { "do_gc: there should be 1 snapshot, 0 segment/block", expected_num_of_snapshot, 0, // 0 snapshot statistic - 1, // 0 segments - 1, // 0 blocks - 1, // 0 index + 1, // 1 segments + 1, // 1 blocks + 1, // 1 index Some(()), None, ) diff --git a/src/query/service/tests/it/storages/fuse/operations/purge_drop.rs b/src/query/service/tests/it/storages/fuse/operations/purge_drop.rs index 9cccb0d6ee38..b78191acce2f 100644 --- a/src/query/service/tests/it/storages/fuse/operations/purge_drop.rs +++ b/src/query/service/tests/it/storages/fuse/operations/purge_drop.rs @@ -15,7 +15,6 @@ use common_base::base::tokio; use common_exception::Result; use databend_query::test_kits::table_test_fixture::append_sample_data; -use databend_query::test_kits::table_test_fixture::check_data_dir; use databend_query::test_kits::table_test_fixture::execute_command; use databend_query::test_kits::table_test_fixture::TestFixture; @@ -34,32 +33,3 @@ async fn test_fuse_snapshot_truncate_in_drop_stmt() -> Result<()> { execute_command(ctx.clone(), qry.as_str()).await?; Ok(()) } - -#[tokio::test(flavor = "multi_thread")] -async fn test_fuse_snapshot_truncate_in_drop_all_stmt() -> Result<()> { - let fixture = TestFixture::new().await; - let db = fixture.default_db_name(); - let tbl = fixture.default_table_name(); - let ctx = fixture.ctx(); - fixture.create_default_table().await?; - - // ingests some test data - append_sample_data(1, &fixture).await?; - // let's Drop - let qry = format!("drop table {}.{} all", db, tbl); - execute_command(ctx.clone(), qry.as_str()).await?; - - check_data_dir( - &fixture, - "drop table: there should be 1 snapshot, 1 segment/block", - 1, // 1 snapshot - 0, // 0 snapshot statistic - 1, // 0 segments - 1, // 0 blocks - 1, // 0 index - None, - None, - ) - .await?; - Ok(()) -} diff --git a/src/query/sql/src/planner/binder/ddl/table.rs b/src/query/sql/src/planner/binder/ddl/table.rs index 89525aa02b4b..043546fe05a5 100644 --- a/src/query/sql/src/planner/binder/ddl/table.rs +++ b/src/query/sql/src/planner/binder/ddl/table.rs @@ -660,7 +660,6 @@ impl Binder { catalog, database, table, - all, } = stmt; let tenant = self.ctx.get_tenant(); @@ -673,7 +672,6 @@ impl Binder { catalog, database, table, - all: *all, }))) } diff --git a/src/query/sql/src/planner/plans/ddl/table.rs b/src/query/sql/src/planner/plans/ddl/table.rs index ea86d7575ac5..899eae287f8a 100644 --- a/src/query/sql/src/planner/plans/ddl/table.rs +++ b/src/query/sql/src/planner/plans/ddl/table.rs @@ -84,7 +84,6 @@ pub struct DropTablePlan { pub database: String, /// The table name pub table: String, - pub all: bool, } impl DropTablePlan { diff --git a/src/query/storages/fuse/src/io/read/block/block_reader_parquet_deserialize.rs b/src/query/storages/fuse/src/io/read/block/block_reader_parquet_deserialize.rs index 1c7a8528965b..e83767533c59 100644 --- a/src/query/storages/fuse/src/io/read/block/block_reader_parquet_deserialize.rs +++ b/src/query/storages/fuse/src/io/read/block/block_reader_parquet_deserialize.rs @@ -336,7 +336,7 @@ impl BlockReader { Suppose the name of table is T ~~~ create table tmp_t as select * from T; - drop table T all; + drop table T; alter table tmp_t rename to T; ~~~ Please note that the history of table T WILL BE LOST. diff --git a/src/tests/sqlsmith/src/sql_gen/ddl.rs b/src/tests/sqlsmith/src/sql_gen/ddl.rs index 2e45b07787c2..de7732ab5198 100644 --- a/src/tests/sqlsmith/src/sql_gen/ddl.rs +++ b/src/tests/sqlsmith/src/sql_gen/ddl.rs @@ -66,7 +66,6 @@ impl<'a, R: Rng> SqlGenerator<'a, R> { catalog: None, database: None, table: Identifier::from_name(table_name.clone()), - all: false, }; let create_table = CreateTableStmt { if_not_exists: true, diff --git a/tests/sqllogictests/suites/base/01_system/01_0002_system_query_log b/tests/sqllogictests/suites/base/01_system/01_0002_system_query_log index 9ec8305f11df..82ce2774d1d8 100644 --- a/tests/sqllogictests/suites/base/01_system/01_0002_system_query_log +++ b/tests/sqllogictests/suites/base/01_system/01_0002_system_query_log @@ -12,7 +12,7 @@ select count(*) > 0 from system.query_log 1 statement ok -drop table if exists tbl_01_0002 all +drop table if exists tbl_01_0002 statement ok create table tbl_01_0002(a int) diff --git a/tests/sqllogictests/suites/base/01_system/01_0007_system_clustering_history b/tests/sqllogictests/suites/base/01_system/01_0007_system_clustering_history index 0294adf01e81..1072cd21c742 100644 --- a/tests/sqllogictests/suites/base/01_system/01_0007_system_clustering_history +++ b/tests/sqllogictests/suites/base/01_system/01_0007_system_clustering_history @@ -1,5 +1,5 @@ statement ok -drop table if exists tbl_01_0007 all +drop table if exists tbl_01_0007 statement ok create table tbl_01_0007(a int not null) cluster by(a) diff --git a/tests/sqllogictests/suites/base/03_common/03_0003_select_group_by b/tests/sqllogictests/suites/base/03_common/03_0003_select_group_by index 5a57592d690f..602cf6039f5d 100644 --- a/tests/sqllogictests/suites/base/03_common/03_0003_select_group_by +++ b/tests/sqllogictests/suites/base/03_common/03_0003_select_group_by @@ -75,7 +75,7 @@ statement ok DROP table t statement ok -drop table if exists t_datetime all +drop table if exists t_datetime statement ok CREATE TABLE t_datetime(created_at Date, created_time DateTime, count Int32) diff --git a/tests/sqllogictests/suites/base/03_common/03_0025_delete_from b/tests/sqllogictests/suites/base/03_common/03_0025_delete_from index 692e4f549964..ec371b908bba 100644 --- a/tests/sqllogictests/suites/base/03_common/03_0025_delete_from +++ b/tests/sqllogictests/suites/base/03_common/03_0025_delete_from @@ -54,7 +54,7 @@ select count(*) = 0 from t statement ok -drop table t all +drop table t statement ok create table t (c Int null) @@ -141,7 +141,7 @@ select count(*) = 0 from t statement ok -drop table t all +drop table t statement ok create table t(c Int) CLUSTER BY(c+1) @@ -161,7 +161,7 @@ select count(*) = 2 from t 1 statement ok -drop table t all +drop table t statement ok create table t(a Int, b Int) @@ -186,7 +186,7 @@ statement ok delete from t where t.a in (select * from numbers(10)) statement ok -drop table t all +drop table t #################################### @@ -245,7 +245,7 @@ select * from t order by c; statement ok -drop table t all +drop table t #################################### # delete pruning, whole segments # @@ -279,7 +279,7 @@ select * from t order by c; 9 statement ok -drop table t all +drop table t # test large data statement ok @@ -319,7 +319,7 @@ select count(*) from t where c >= 0 and c < 1500000; 0 statement ok -drop table t all +drop table t statement ok DROP DATABASE db1 diff --git a/tests/sqllogictests/suites/base/03_common/03_0028_copy_into_stage b/tests/sqllogictests/suites/base/03_common/03_0028_copy_into_stage index ec78cb18074a..e607cd744ed2 100644 --- a/tests/sqllogictests/suites/base/03_common/03_0028_copy_into_stage +++ b/tests/sqllogictests/suites/base/03_common/03_0028_copy_into_stage @@ -31,7 +31,7 @@ SELECT COUNT() FROM test_table 4 statement ok -drop table test_table all +drop table test_table statement ok drop stage test diff --git a/tests/sqllogictests/suites/base/03_common/03_0031_copy_into_user_stage b/tests/sqllogictests/suites/base/03_common/03_0031_copy_into_user_stage index 6dee4c9f4fdf..462da395ef8e 100644 --- a/tests/sqllogictests/suites/base/03_common/03_0031_copy_into_user_stage +++ b/tests/sqllogictests/suites/base/03_common/03_0031_copy_into_user_stage @@ -28,7 +28,7 @@ SELECT COUNT() FROM test_table 4 statement ok -drop table test_table all +drop table test_table statement ok DROP DATABASE db1 diff --git a/tests/sqllogictests/suites/base/03_common/03_0035_update b/tests/sqllogictests/suites/base/03_common/03_0035_update index 25d88b78d7d6..f347f054432a 100644 --- a/tests/sqllogictests/suites/base/03_common/03_0035_update +++ b/tests/sqllogictests/suites/base/03_common/03_0035_update @@ -89,13 +89,13 @@ select a from t3 6 statement ok -drop table t1 all +drop table t1 statement ok -drop table t2 all +drop table t2 statement ok -drop table t3 all +drop table t3 statement ok create table t1(id1 int, val1 varchar(255)); diff --git a/tests/sqllogictests/suites/base/05_ddl/05_0001_ddl_drop_table_full b/tests/sqllogictests/suites/base/05_ddl/05_0001_ddl_drop_table_full index 5be01f171242..35cf934d75ed 100644 --- a/tests/sqllogictests/suites/base/05_ddl/05_0001_ddl_drop_table_full +++ b/tests/sqllogictests/suites/base/05_ddl/05_0001_ddl_drop_table_full @@ -11,13 +11,13 @@ statement ok CREATE TABLE t(c1 int) ENGINE = Null statement ok -DROP TABLE t ALL +DROP TABLE t statement ok CREATE TABLE t(c1 int) ENGINE = Fuse statement ok -DROP TABLE t ALL +DROP TABLE t statement ok DROP database db_13_0001 diff --git a/tests/sqllogictests/suites/base/05_ddl/05_0023_exists_table b/tests/sqllogictests/suites/base/05_ddl/05_0023_exists_table index 971e21f68fbc..80bcb8cbfe49 100644 --- a/tests/sqllogictests/suites/base/05_ddl/05_0023_exists_table +++ b/tests/sqllogictests/suites/base/05_ddl/05_0023_exists_table @@ -23,7 +23,7 @@ statement ok EXISTS TABLE db_05_0023_v2.t statement ok -DROP TABLE t ALL +DROP TABLE t statement ok EXISTS TABLE db_05_0023_v2.t diff --git a/tests/sqllogictests/suites/base/12_time_travel/12_0003_time_travel_undrop b/tests/sqllogictests/suites/base/12_time_travel/12_0003_time_travel_undrop index 0f0609ace3e8..50acc17bf61c 100644 --- a/tests/sqllogictests/suites/base/12_time_travel/12_0003_time_travel_undrop +++ b/tests/sqllogictests/suites/base/12_time_travel/12_0003_time_travel_undrop @@ -103,22 +103,5 @@ SELECT count(1) FROM t statement ok DROP TABLE t -statement ok -CREATE TABLE t(c int) - -statement ok -INSERT INTO t values(1) - -statement ok -DROP TABLE t ALL - -statement ok -UNDROP TABLE t - -query I -SELECT count(*) FROM t ----- -0 - statement ok DROP database db_12_0003 diff --git a/tests/sqllogictests/suites/base/issues/issue_10103.test b/tests/sqllogictests/suites/base/issues/issue_10103.test index f617f68c50e8..3be0dad32034 100644 --- a/tests/sqllogictests/suites/base/issues/issue_10103.test +++ b/tests/sqllogictests/suites/base/issues/issue_10103.test @@ -34,10 +34,10 @@ SELECT ts FROM test_ts_table LIMIT 1 2023-02-19 11:18:01.000000 statement ok -drop table test_table all +drop table test_table statement ok -drop table test_ts_table all +drop table test_ts_table statement ok drop stage test_10103 diff --git a/tests/sqllogictests/suites/duckdb/sql/aggregate/group/group_by_grouping_sets.test b/tests/sqllogictests/suites/duckdb/sql/aggregate/group/group_by_grouping_sets.test index e19ae1bf808e..4bfb3f29314e 100644 --- a/tests/sqllogictests/suites/duckdb/sql/aggregate/group/group_by_grouping_sets.test +++ b/tests/sqllogictests/suites/duckdb/sql/aggregate/group/group_by_grouping_sets.test @@ -245,10 +245,10 @@ a B 1 5 NULL B a A 1 5 NULL NULL statement ok -drop table t all; +drop table t; statement ok -drop table tt all; +drop table tt; statement ok drop database grouping_sets; diff --git a/tests/sqllogictests/suites/mode/cluster/04_0002_explain_v2.test b/tests/sqllogictests/suites/mode/cluster/04_0002_explain_v2.test index 3fa4bfbf8c5e..a96df63e25e0 100644 --- a/tests/sqllogictests/suites/mode/cluster/04_0002_explain_v2.test +++ b/tests/sqllogictests/suites/mode/cluster/04_0002_explain_v2.test @@ -2,10 +2,10 @@ statement ok set prefer_broadcast_join = 0 statement ok -drop table if exists t1 all; +drop table if exists t1; statement ok -drop table if exists t2 all; +drop table if exists t2; statement ok create table t1(a int not null, b int not null); diff --git a/tests/sqllogictests/suites/mode/standalone/explain/explain.test b/tests/sqllogictests/suites/mode/standalone/explain/explain.test index 40519c29096a..132b69430e93 100644 --- a/tests/sqllogictests/suites/mode/standalone/explain/explain.test +++ b/tests/sqllogictests/suites/mode/standalone/explain/explain.test @@ -1,8 +1,8 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok -drop table if exists t2 all +drop table if exists t2 statement ok create table t1 as select number as a, number as b from numbers(1) diff --git a/tests/sqllogictests/suites/mode/standalone/explain_native/explain.test b/tests/sqllogictests/suites/mode/standalone/explain_native/explain.test index 267c0cc4489c..b9f77ca809f8 100644 --- a/tests/sqllogictests/suites/mode/standalone/explain_native/explain.test +++ b/tests/sqllogictests/suites/mode/standalone/explain_native/explain.test @@ -1,8 +1,8 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok -drop table if exists t2 all +drop table if exists t2 statement ok create table t1 as select number as a, number as b from numbers(1) diff --git a/tests/sqllogictests/suites/query/02_function/02_0012_function_datetimes b/tests/sqllogictests/suites/query/02_function/02_0012_function_datetimes index b795556fdfe0..45ea602d715e 100644 --- a/tests/sqllogictests/suites/query/02_function/02_0012_function_datetimes +++ b/tests/sqllogictests/suites/query/02_function/02_0012_function_datetimes @@ -1,5 +1,5 @@ statement ok -drop table if exists t all +drop table if exists t statement ok set timezone = 'UTC' diff --git a/tests/sqllogictests/suites/query/02_function/02_0012_function_datetimes_tz b/tests/sqllogictests/suites/query/02_function/02_0012_function_datetimes_tz index 765c124cbf74..cd751f969ee3 100644 --- a/tests/sqllogictests/suites/query/02_function/02_0012_function_datetimes_tz +++ b/tests/sqllogictests/suites/query/02_function/02_0012_function_datetimes_tz @@ -1,5 +1,5 @@ statement ok -drop table if exists tt all +drop table if exists tt statement ok set timezone='UTC' diff --git a/tests/sqllogictests/suites/query/02_function/02_0014_function_maths b/tests/sqllogictests/suites/query/02_function/02_0014_function_maths index e88a7c4cb3f7..6325dc32dbbc 100644 --- a/tests/sqllogictests/suites/query/02_function/02_0014_function_maths +++ b/tests/sqllogictests/suites/query/02_function/02_0014_function_maths @@ -1,5 +1,5 @@ statement ok -drop table if exists math_sample_numbers all +drop table if exists math_sample_numbers statement ok CREATE TABLE math_sample_numbers (timestamp UInt32, value Int32) Engine = Fuse diff --git a/tests/sqllogictests/suites/query/02_function/02_0018_function_strings_repeat b/tests/sqllogictests/suites/query/02_function/02_0018_function_strings_repeat index a2c3ba90ad7e..7e6bcdbafe20 100644 --- a/tests/sqllogictests/suites/query/02_function/02_0018_function_strings_repeat +++ b/tests/sqllogictests/suites/query/02_function/02_0018_function_strings_repeat @@ -1,14 +1,14 @@ statement ok -drop table if exists strings_repeat_sample_u8 all +drop table if exists strings_repeat_sample_u8 statement ok -drop table if exists strings_repeat_sample_u16 all +drop table if exists strings_repeat_sample_u16 statement ok -drop table if exists strings_repeat_sample_u32 all +drop table if exists strings_repeat_sample_u32 statement ok -drop table if exists strings_repeat_sample_u64 all +drop table if exists strings_repeat_sample_u64 statement ok CREATE TABLE strings_repeat_sample_u8(s String, n Uint8) Engine = Fuse diff --git a/tests/sqllogictests/suites/query/02_function/02_0048_function_semi_structureds_object_keys b/tests/sqllogictests/suites/query/02_function/02_0048_function_semi_structureds_object_keys index d475a1a6839c..af71632ed5ce 100644 --- a/tests/sqllogictests/suites/query/02_function/02_0048_function_semi_structureds_object_keys +++ b/tests/sqllogictests/suites/query/02_function/02_0048_function_semi_structureds_object_keys @@ -1,5 +1,5 @@ statement ok -drop table if exists objects_test1 all +drop table if exists objects_test1 statement ok CREATE TABLE IF NOT EXISTS objects_test1(id TINYINT, obj VARIANT, var VARIANT) Engine = Fuse diff --git a/tests/sqllogictests/suites/query/cte.test b/tests/sqllogictests/suites/query/cte.test index 05f2173d097c..cae63902965c 100644 --- a/tests/sqllogictests/suites/query/cte.test +++ b/tests/sqllogictests/suites/query/cte.test @@ -2,7 +2,7 @@ statement ok use default statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer, b integer, c integer, d integer, e integer) diff --git a/tests/sqllogictests/suites/query/render_result.test b/tests/sqllogictests/suites/query/render_result.test index aef7151accd1..bfb960bf447c 100644 --- a/tests/sqllogictests/suites/query/render_result.test +++ b/tests/sqllogictests/suites/query/render_result.test @@ -2,7 +2,7 @@ statement ok use default statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer, b integer, c integer, d integer, e integer) @@ -30,4 +30,4 @@ order by col1,col5,col3,col2,col4 106 1 333 1067 109 statement ok -drop table if exists t1 all +drop table if exists t1 diff --git a/tests/sqllogictests/suites/ydb/select1-1.test b/tests/sqllogictests/suites/ydb/select1-1.test index 766b45362807..0d0390458591 100644 --- a/tests/sqllogictests/suites/ydb/select1-1.test +++ b/tests/sqllogictests/suites/ydb/select1-1.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer, b integer, c integer, d integer, e integer) diff --git a/tests/sqllogictests/suites/ydb/select1-2.test b/tests/sqllogictests/suites/ydb/select1-2.test index b8eb36808143..6093d3b5d977 100644 --- a/tests/sqllogictests/suites/ydb/select1-2.test +++ b/tests/sqllogictests/suites/ydb/select1-2.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer, b integer, c integer, d integer, e integer) diff --git a/tests/sqllogictests/suites/ydb/select1-3.test b/tests/sqllogictests/suites/ydb/select1-3.test index 4d3b407c6f04..7ebd1a7dc9fa 100644 --- a/tests/sqllogictests/suites/ydb/select1-3.test +++ b/tests/sqllogictests/suites/ydb/select1-3.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer, b integer, c integer, d integer, e integer) diff --git a/tests/sqllogictests/suites/ydb/select1-4.test b/tests/sqllogictests/suites/ydb/select1-4.test index 49a214f8502e..b6906faa30d2 100644 --- a/tests/sqllogictests/suites/ydb/select1-4.test +++ b/tests/sqllogictests/suites/ydb/select1-4.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer, b integer, c integer, d integer, e integer) diff --git a/tests/sqllogictests/suites/ydb/select1-5.test b/tests/sqllogictests/suites/ydb/select1-5.test index 4be2a249bfe1..dcf4708612d2 100644 --- a/tests/sqllogictests/suites/ydb/select1-5.test +++ b/tests/sqllogictests/suites/ydb/select1-5.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer, b integer, c integer, d integer, e integer) diff --git a/tests/sqllogictests/suites/ydb/select2-1.test b/tests/sqllogictests/suites/ydb/select2-1.test index 36cf9b00c8e9..9b45f459106e 100644 --- a/tests/sqllogictests/suites/ydb/select2-1.test +++ b/tests/sqllogictests/suites/ydb/select2-1.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/sqllogictests/suites/ydb/select2-2.test b/tests/sqllogictests/suites/ydb/select2-2.test index 9d5471946805..3ff660a15473 100644 --- a/tests/sqllogictests/suites/ydb/select2-2.test +++ b/tests/sqllogictests/suites/ydb/select2-2.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/sqllogictests/suites/ydb/select2-3.test b/tests/sqllogictests/suites/ydb/select2-3.test index 8261386c5567..3309b5d1b6b8 100644 --- a/tests/sqllogictests/suites/ydb/select2-3.test +++ b/tests/sqllogictests/suites/ydb/select2-3.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/sqllogictests/suites/ydb/select2-4.test b/tests/sqllogictests/suites/ydb/select2-4.test index 0cf264950b1f..2161ba020978 100644 --- a/tests/sqllogictests/suites/ydb/select2-4.test +++ b/tests/sqllogictests/suites/ydb/select2-4.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/sqllogictests/suites/ydb/select2-5.test b/tests/sqllogictests/suites/ydb/select2-5.test index c992d31c3c2d..10b98ba17e16 100644 --- a/tests/sqllogictests/suites/ydb/select2-5.test +++ b/tests/sqllogictests/suites/ydb/select2-5.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/sqllogictests/suites/ydb/select3-1.test b/tests/sqllogictests/suites/ydb/select3-1.test index 22456c0cd6c9..073074374831 100644 --- a/tests/sqllogictests/suites/ydb/select3-1.test +++ b/tests/sqllogictests/suites/ydb/select3-1.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/sqllogictests/suites/ydb/select3-10.test b/tests/sqllogictests/suites/ydb/select3-10.test index 9866756f8909..c1f1216ab432 100644 --- a/tests/sqllogictests/suites/ydb/select3-10.test +++ b/tests/sqllogictests/suites/ydb/select3-10.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/sqllogictests/suites/ydb/select3-11.test b/tests/sqllogictests/suites/ydb/select3-11.test index 1a446a863362..e3e6d6b63fd7 100644 --- a/tests/sqllogictests/suites/ydb/select3-11.test +++ b/tests/sqllogictests/suites/ydb/select3-11.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/sqllogictests/suites/ydb/select3-12.test b/tests/sqllogictests/suites/ydb/select3-12.test index 041169ab4421..708709884737 100644 --- a/tests/sqllogictests/suites/ydb/select3-12.test +++ b/tests/sqllogictests/suites/ydb/select3-12.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/sqllogictests/suites/ydb/select3-13.test b/tests/sqllogictests/suites/ydb/select3-13.test index 4f0d2d708804..ae77d1da8322 100644 --- a/tests/sqllogictests/suites/ydb/select3-13.test +++ b/tests/sqllogictests/suites/ydb/select3-13.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/sqllogictests/suites/ydb/select3-14.test b/tests/sqllogictests/suites/ydb/select3-14.test index 223a247e9df1..1b62d6d12f15 100644 --- a/tests/sqllogictests/suites/ydb/select3-14.test +++ b/tests/sqllogictests/suites/ydb/select3-14.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/sqllogictests/suites/ydb/select3-15.test b/tests/sqllogictests/suites/ydb/select3-15.test index 4c814cf69207..cf6c1fab47d2 100644 --- a/tests/sqllogictests/suites/ydb/select3-15.test +++ b/tests/sqllogictests/suites/ydb/select3-15.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/sqllogictests/suites/ydb/select3-2.test b/tests/sqllogictests/suites/ydb/select3-2.test index e6b89c287891..f5e46aca3f4a 100644 --- a/tests/sqllogictests/suites/ydb/select3-2.test +++ b/tests/sqllogictests/suites/ydb/select3-2.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/sqllogictests/suites/ydb/select3-3.test b/tests/sqllogictests/suites/ydb/select3-3.test index bd6c1fcaa9aa..feb46d3a94db 100644 --- a/tests/sqllogictests/suites/ydb/select3-3.test +++ b/tests/sqllogictests/suites/ydb/select3-3.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/sqllogictests/suites/ydb/select3-4.test b/tests/sqllogictests/suites/ydb/select3-4.test index 0ff745810507..e3dd6076aa0a 100644 --- a/tests/sqllogictests/suites/ydb/select3-4.test +++ b/tests/sqllogictests/suites/ydb/select3-4.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/sqllogictests/suites/ydb/select3-5.test b/tests/sqllogictests/suites/ydb/select3-5.test index 259d23e75d5b..a9913c947404 100644 --- a/tests/sqllogictests/suites/ydb/select3-5.test +++ b/tests/sqllogictests/suites/ydb/select3-5.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/sqllogictests/suites/ydb/select3-6.test b/tests/sqllogictests/suites/ydb/select3-6.test index 4877d8876609..312d01a101a1 100644 --- a/tests/sqllogictests/suites/ydb/select3-6.test +++ b/tests/sqllogictests/suites/ydb/select3-6.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/sqllogictests/suites/ydb/select3-7.test b/tests/sqllogictests/suites/ydb/select3-7.test index a2e5a8d5ddaa..477e9ea78503 100644 --- a/tests/sqllogictests/suites/ydb/select3-7.test +++ b/tests/sqllogictests/suites/ydb/select3-7.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/sqllogictests/suites/ydb/select3-8.test b/tests/sqllogictests/suites/ydb/select3-8.test index 37e3828f498c..855703744f49 100644 --- a/tests/sqllogictests/suites/ydb/select3-8.test +++ b/tests/sqllogictests/suites/ydb/select3-8.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/sqllogictests/suites/ydb/select3-9.test b/tests/sqllogictests/suites/ydb/select3-9.test index f42942c264d9..01b582ab3fa2 100644 --- a/tests/sqllogictests/suites/ydb/select3-9.test +++ b/tests/sqllogictests/suites/ydb/select3-9.test @@ -1,5 +1,5 @@ statement ok -drop table if exists t1 all +drop table if exists t1 statement ok create table t1(a integer null, b integer null, c integer null, d integer null, e integer null) diff --git a/tests/suites/0_stateless/17_altertable/17_0002_alter_table_purge_before.sh b/tests/suites/0_stateless/17_altertable/17_0002_alter_table_purge_before.sh index 85f385032306..93644ecfd07d 100755 --- a/tests/suites/0_stateless/17_altertable/17_0002_alter_table_purge_before.sh +++ b/tests/suites/0_stateless/17_altertable/17_0002_alter_table_purge_before.sh @@ -46,7 +46,7 @@ echo "checking that after purge (by snapshot id) there should be 4 rows left" echo "select count(*)=4 from t17_0002" | $MYSQL_CLIENT_CONNECT ## Drop table. -echo "drop table t17_0002 all" | $MYSQL_CLIENT_CONNECT +echo "drop table t17_0002" | $MYSQL_CLIENT_CONNECT # PURGE BEFORE TIMESTAMP @@ -90,4 +90,4 @@ echo "checking that after purge (by timestamp) there should be 5 rows left" echo "select count(*)=5 from t17_0002" | $MYSQL_CLIENT_CONNECT ## Drop table. -echo "drop table t17_0002 all" | $MYSQL_CLIENT_CONNECT +echo "drop table t17_0002" | $MYSQL_CLIENT_CONNECT diff --git a/tests/suites/0_stateless/17_altertable/17_0003_alter_table_update.sh b/tests/suites/0_stateless/17_altertable/17_0003_alter_table_update.sh index fa884b01df13..e862a3bc85f8 100755 --- a/tests/suites/0_stateless/17_altertable/17_0003_alter_table_update.sh +++ b/tests/suites/0_stateless/17_altertable/17_0003_alter_table_update.sh @@ -25,7 +25,7 @@ echo "update table column" echo "update t17_0003 set c=2 where c=1" | $MYSQL_CLIENT_CONNECT ## Drop table. -echo "drop table t17_0003 all" | $MYSQL_CLIENT_CONNECT +echo "drop table t17_0003" | $MYSQL_CLIENT_CONNECT ## create two column table echo "create table t17_0003(a int not null, b int not null)" | $MYSQL_CLIENT_CONNECT @@ -50,4 +50,4 @@ echo "update table column" echo "update t17_0003 set a=3 where a=1" | $MYSQL_CLIENT_CONNECT ## Drop table. -echo "drop table t17_0003 all" | $MYSQL_CLIENT_CONNECT \ No newline at end of file +echo "drop table t17_0003" | $MYSQL_CLIENT_CONNECT \ No newline at end of file diff --git a/tests/suites/0_stateless/20+_others/20_0011_purge_before.sh b/tests/suites/0_stateless/20+_others/20_0011_purge_before.sh index 4980aa0d9885..49d136e729c4 100755 --- a/tests/suites/0_stateless/20+_others/20_0011_purge_before.sh +++ b/tests/suites/0_stateless/20+_others/20_0011_purge_before.sh @@ -31,7 +31,7 @@ echo "checking that after purge (by snapshot id) there should be 4 rows left" echo "select count(*)=4 from t20_0011" | $MYSQL_CLIENT_CONNECT ## Drop table. -echo "drop table t20_0011 all" | $MYSQL_CLIENT_CONNECT +echo "drop table t20_0011" | $MYSQL_CLIENT_CONNECT # PURGE BEFORE TIMESTAMP @@ -58,4 +58,4 @@ echo "checking that after purge (by timestamp) there should be 4 rows left" echo "select count(*)=4 from t20_0011" | $MYSQL_CLIENT_CONNECT ## Drop table. -echo "drop table t20_0011 all" | $MYSQL_CLIENT_CONNECT +echo "drop table t20_0011" | $MYSQL_CLIENT_CONNECT diff --git a/tests/suites/0_stateless/20+_others/20_0012_privilege_access.sh b/tests/suites/0_stateless/20+_others/20_0012_privilege_access.sh index 134e1f1c5efd..c766b6005b5b 100755 --- a/tests/suites/0_stateless/20+_others/20_0012_privilege_access.sh +++ b/tests/suites/0_stateless/20+_others/20_0012_privilege_access.sh @@ -113,9 +113,9 @@ echo "GRANT SELECT ON system.fuse_block TO 'test-user'" | $MYSQL_CLIENT_CONNECT echo "select count(*)>=1 from fuse_block('default', 't20_0012_a')" | $TEST_USER_CONNECT ## Drop table. -echo "drop table default.t20_0012 all" | $MYSQL_CLIENT_CONNECT -echo "drop table default.t20_0012_a all" | $MYSQL_CLIENT_CONNECT -echo "drop table default.t20_0012_b all" | $MYSQL_CLIENT_CONNECT +echo "drop table default.t20_0012" | $MYSQL_CLIENT_CONNECT +echo "drop table default.t20_0012_a" | $MYSQL_CLIENT_CONNECT +echo "drop table default.t20_0012_b" | $MYSQL_CLIENT_CONNECT echo "drop view default2.v_t20_0012" | $MYSQL_CLIENT_CONNECT ## Drop database. diff --git a/tests/suites/1_stateful/04_mini_dataset/04_0001_mini_hits.sh b/tests/suites/1_stateful/04_mini_dataset/04_0001_mini_hits.sh index 291ad00c80f2..e5a7e3ffa244 100755 --- a/tests/suites/1_stateful/04_mini_dataset/04_0001_mini_hits.sh +++ b/tests/suites/1_stateful/04_mini_dataset/04_0001_mini_hits.sh @@ -105,4 +105,4 @@ for i in "${hits_statements[@]}"; do done ## Clean up -echo "drop table if exists hits all;" | $MYSQL_CLIENT_CONNECT +echo "drop table if exists hits;" | $MYSQL_CLIENT_CONNECT diff --git a/tests/suites/1_stateful/05_formats/05_01_compact/05_01_00_load_compact_copy.sh b/tests/suites/1_stateful/05_formats/05_01_compact/05_01_00_load_compact_copy.sh index bd847a61660b..be21f27434c9 100755 --- a/tests/suites/1_stateful/05_formats/05_01_compact/05_01_00_load_compact_copy.sh +++ b/tests/suites/1_stateful/05_formats/05_01_compact/05_01_00_load_compact_copy.sh @@ -10,7 +10,7 @@ for j in $(seq 1 1000);do printf "0123456789\n" >> "$DATA" done -echo "drop table if exists t1 all" | $MYSQL_CLIENT_CONNECT +echo "drop table if exists t1" | $MYSQL_CLIENT_CONNECT echo "CREATE TABLE t1 ( c0 string diff --git a/tests/suites/1_stateful/05_formats/05_01_compact/05_01_01_load_compact_streaming_load.sh b/tests/suites/1_stateful/05_formats/05_01_compact/05_01_01_load_compact_streaming_load.sh index 2b37912014b9..ef82801b4435 100755 --- a/tests/suites/1_stateful/05_formats/05_01_compact/05_01_01_load_compact_streaming_load.sh +++ b/tests/suites/1_stateful/05_formats/05_01_compact/05_01_01_load_compact_streaming_load.sh @@ -10,7 +10,7 @@ for j in $(seq 1 1000);do printf "0123456789\n" >> "$DATA" done -echo "drop table if exists t1 all" | $MYSQL_CLIENT_CONNECT +echo "drop table if exists t1" | $MYSQL_CLIENT_CONNECT echo "CREATE TABLE t1 ( c0 string diff --git a/tests/suites/1_stateful/05_formats/05_01_compact/05_01_02_load_compact_copy_max_size.sh b/tests/suites/1_stateful/05_formats/05_01_compact/05_01_02_load_compact_copy_max_size.sh index 4f9c131b9a89..2c2a0c6ebda1 100755 --- a/tests/suites/1_stateful/05_formats/05_01_compact/05_01_02_load_compact_copy_max_size.sh +++ b/tests/suites/1_stateful/05_formats/05_01_compact/05_01_02_load_compact_copy_max_size.sh @@ -11,7 +11,7 @@ for j in $(seq 1 1000);do printf "0123456789\n" >> "$DATA" done -echo "drop table if exists t1 all" | $MYSQL_CLIENT_CONNECT +echo "drop table if exists t1" | $MYSQL_CLIENT_CONNECT echo "CREATE TABLE t1 ( c0 string diff --git a/tests/suites/1_stateful/05_formats/05_01_compact/05_01_02_load_compact_copy_row_per_block.sh b/tests/suites/1_stateful/05_formats/05_01_compact/05_01_02_load_compact_copy_row_per_block.sh index 97ee66da39e7..fb9a283df0b6 100755 --- a/tests/suites/1_stateful/05_formats/05_01_compact/05_01_02_load_compact_copy_row_per_block.sh +++ b/tests/suites/1_stateful/05_formats/05_01_compact/05_01_02_load_compact_copy_row_per_block.sh @@ -11,7 +11,7 @@ for j in $(seq 1 1000);do printf "0123456789\n" >> "$DATA" done -echo "drop table if exists t1 all" | $MYSQL_CLIENT_CONNECT +echo "drop table if exists t1" | $MYSQL_CLIENT_CONNECT echo "CREATE TABLE t1 ( c0 string diff --git a/tests/suites/5_ee/01_vacuum/01_0002_ee_vacuum_drop_table.sh b/tests/suites/5_ee/01_vacuum/01_0002_ee_vacuum_drop_table.sh index 867c6206beb8..c26b7c7be883 100755 --- a/tests/suites/5_ee/01_vacuum/01_0002_ee_vacuum_drop_table.sh +++ b/tests/suites/5_ee/01_vacuum/01_0002_ee_vacuum_drop_table.sh @@ -103,3 +103,4 @@ echo "drop database if exists test_vacuum_drop_4" | $MYSQL_CLIENT_CONNECT ## Drop table echo "drop table if exists table_drop_external_location;" | $MYSQL_CLIENT_CONNECT +echo "VACUUM DROP TABLE;" | $MYSQL_CLIENT_CONNECT From 2eba90aa4e9291eb28bab134faaac505ada62486 Mon Sep 17 00:00:00 2001 From: zhyass Date: Tue, 26 Sep 2023 22:54:54 +0800 Subject: [PATCH 5/9] fix test --- tests/suites/1_stateful/04_mini_dataset/04_0000_mini_ontime.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/suites/1_stateful/04_mini_dataset/04_0000_mini_ontime.sh b/tests/suites/1_stateful/04_mini_dataset/04_0000_mini_ontime.sh index 707acc4db0d0..acf4a58613f7 100755 --- a/tests/suites/1_stateful/04_mini_dataset/04_0000_mini_ontime.sh +++ b/tests/suites/1_stateful/04_mini_dataset/04_0000_mini_ontime.sh @@ -28,4 +28,4 @@ for i in "${ontime_statements[@]}"; do done ## Clean table -echo "drop table if exists ontime_mini all;" | $MYSQL_CLIENT_CONNECT +echo "drop table if exists ontime_mini;" | $MYSQL_CLIENT_CONNECT From 22b309c26a904cfa6c259616d98017ec35cd5c6a Mon Sep 17 00:00:00 2001 From: zhyass Date: Wed, 27 Sep 2023 01:26:21 +0800 Subject: [PATCH 6/9] fix docs --- .../00-ddl/20-table/10-ddl-create-table.md | 54 ++----------------- 1 file changed, 5 insertions(+), 49 deletions(-) diff --git a/docs/doc/14-sql-commands/00-ddl/20-table/10-ddl-create-table.md b/docs/doc/14-sql-commands/00-ddl/20-table/10-ddl-create-table.md index ab001529cb40..ad8e730176a1 100644 --- a/docs/doc/14-sql-commands/00-ddl/20-table/10-ddl-create-table.md +++ b/docs/doc/14-sql-commands/00-ddl/20-table/10-ddl-create-table.md @@ -22,13 +22,12 @@ Databend aims to be easy to use by design and does NOT require any of those oper - [CREATE TABLE](#create-table): Creates a table from scratch. - [CREATE TABLE ... LIKE](#create-table--like): Creates a table with the same column definitions as an existing one. - [CREATE TABLE ... AS](#create-table--as): Creates a table and inserts data with the results of a SELECT query. -- [CREATE TRANSIENT TABLE](#create-transient-table): Creates a table without storing its historical data for Time Travel.. - [CREATE TABLE ... EXTERNAL_LOCATION](#create-table--external_location): Creates a table and specifies an S3 bucket for the data storage instead of the FUSE engine. ## CREATE TABLE ```sql -CREATE [TRANSIENT] TABLE [IF NOT EXISTS] [db.]table_name +CREATE TABLE [IF NOT EXISTS] [db.]table_name ( [ NOT NULL | NULL] [ { DEFAULT }] [AS () STORED | VIRTUAL], [ NOT NULL | NULL] [ { DEFAULT }] [AS () STORED | VIRTUAL], @@ -54,14 +53,12 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name LIKE [db.]origin_table_name ``` -This command does not include any data or attributes (such as `CLUSTER BY`, `TRANSIENT`, and `COMPRESSION`) from the original table, and instead creates a new table using the default system settings. +This command does not include any data or attributes (such as `CLUSTER BY`, `COMPRESSION`) from the original table, and instead creates a new table using the default system settings. :::note WORKAROUND -- `TRANSIENT` and `COMPRESSION` can be explicitly specified when you create a new table with this command. For example, +- `COMPRESSION` can be explicitly specified when you create a new table with this command. For example, ```sql -create transient table t_new like t_old; - create table t_new compression='lz4' like t_old; ``` ::: @@ -76,31 +73,16 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name AS SELECT query ``` -This command does not include any attributes (such as CLUSTER BY, TRANSIENT, and COMPRESSION) from the original table, and instead creates a new table using the default system settings. +This command does not include any attributes (such as CLUSTER BY, COMPRESSION) from the original table, and instead creates a new table using the default system settings. :::note WORKAROUND -- `TRANSIENT` and `COMPRESSION` can be explicitly specified when you create a new table with this command. For example, +- `COMPRESSION` can be explicitly specified when you create a new table with this command. For example, ```sql -create transient table t_new as select * from t_old; - create table t_new compression='lz4' as select * from t_old; ``` ::: -## CREATE TRANSIENT TABLE - -Creates a transient table. - -Transient tables are used to hold transitory data that does not require a data protection or recovery mechanism. Dataebend does not hold historical data for a transient table so you will not be able to query from a previous version of the transient table with the Time Travel feature, for example, the [AT](./../../20-query-syntax/03-query-at.md) clause in the SELECT statement will not work for transient tables. Please note that you can still [drop](./20-ddl-drop-table.md) and [undrop](./21-ddl-undrop-table.md) a transient table. - -Transient tables help save your storage expenses because they do not need extra space for historical data compared to non-transient tables. See [example](#create-transient-table-1) for detailed explanations. - -Syntax: -```sql -CREATE TRANSIENT TABLE ... -``` - ## CREATE TABLE ... EXTERNAL_LOCATION Creates a table and specifies an S3 bucket for the data storage instead of the FUSE engine. @@ -349,32 +331,6 @@ SELECT * FROM books_backup; +----+----------------+---------+ ``` -### Create Transient Table - -Create a transient table (temporary table) that automatically deletes data after a specified period of time: - -```sql --- Create a transient table -CREATE TRANSIENT TABLE visits ( - visitor_id BIGINT -); - --- Insert values -INSERT INTO visits VALUES(1); -INSERT INTO visits VALUES(2); -INSERT INTO visits VALUES(3); - --- Check the inserted data -SELECT * FROM visits; -+-----------+ -| visitor_id | -+-----------+ -| 1 | -| 2 | -| 3 | -+-----------+ -``` - ### Create Table ... External_Location Create a table with data stored on an external location, such as Amazon S3: From 1c5c610a2607a55ef5700a081f6e88ae67cb71c5 Mon Sep 17 00:00:00 2001 From: zhyass Date: Wed, 27 Sep 2023 01:37:03 +0800 Subject: [PATCH 7/9] fix test --- benchmark/clickbench/hits/clear.sql | 2 +- benchmark/clickbench/tpch/clear.sql | 2 +- benchmark/tpcds/load_data.sh | 2 +- scripts/benchmark/query/load/hits.sh | 2 +- tests/suites/5_ee/01_vacuum/01_0002_ee_vacuum_drop_table.sh | 1 - 5 files changed, 4 insertions(+), 5 deletions(-) diff --git a/benchmark/clickbench/hits/clear.sql b/benchmark/clickbench/hits/clear.sql index af216bba119f..81452994830f 100644 --- a/benchmark/clickbench/hits/clear.sql +++ b/benchmark/clickbench/hits/clear.sql @@ -1,2 +1,2 @@ drop table hits; -VACUUM DROP TABLE; +VACUUM DROP TABLE retain 0 hours; diff --git a/benchmark/clickbench/tpch/clear.sql b/benchmark/clickbench/tpch/clear.sql index 54c2f37aab34..60b4ace1ff06 100644 --- a/benchmark/clickbench/tpch/clear.sql +++ b/benchmark/clickbench/tpch/clear.sql @@ -6,4 +6,4 @@ drop table partsupp; drop table part; drop table region; drop table supplier; -VACUUM DROP TABLE; +VACUUM DROP TABLE retain 0 hours; diff --git a/benchmark/tpcds/load_data.sh b/benchmark/tpcds/load_data.sh index 521c63ea639a..ca34a816b25c 100755 --- a/benchmark/tpcds/load_data.sh +++ b/benchmark/tpcds/load_data.sh @@ -39,7 +39,7 @@ do echo "DROP TABLE IF EXISTS $t" | $MYSQL_CLIENT_CONNECT done -echo "VACUUM DROP TABLE" | $MYSQL_CLIENT_CONNECT +echo "VACUUM DROP TABLE retain 0 hours" | $MYSQL_CLIENT_CONNECT # Create Tables; cat "$CURDIR"/tpcds.sql | $MYSQL_CLIENT_CONNECT diff --git a/scripts/benchmark/query/load/hits.sh b/scripts/benchmark/query/load/hits.sh index 60e69bb9e5f3..2214529cd4d5 100755 --- a/scripts/benchmark/query/load/hits.sh +++ b/scripts/benchmark/query/load/hits.sh @@ -11,7 +11,7 @@ DROP TABLE IF EXISTS hits; SQL cat < Date: Wed, 27 Sep 2023 12:36:50 +0800 Subject: [PATCH 8/9] revert transient table --- benchmark/clickbench/hits/create.sql | 2 +- benchmark/clickbench/hits/create_local.sql | 2 +- .../00-ddl/20-table/10-ddl-create-table.md | 54 +++++++++++++++++-- .../00-ddl/20-table/70-flashback-table.md | 2 +- scripts/benchmark/query/load/hits.sh | 2 +- src/query/ast/src/ast/format/syntax/ddl.rs | 8 ++- src/query/ast/src/ast/statements/table.rs | 7 ++- src/query/ast/src/parser/statement.rs | 4 +- src/query/ast/src/parser/token.rs | 2 + src/query/ast/tests/it/testdata/statement.txt | 15 ++++++ .../interpreters/interpreter_table_create.rs | 2 + .../interpreter_table_show_create.rs | 3 ++ .../it/servers/http/http_query_handlers.rs | 2 +- .../it/storages/testdata/columns_table.txt | 2 + src/query/sql/src/planner/binder/ddl/table.rs | 6 +++ src/query/storages/fuse/src/fuse_table.rs | 4 ++ .../storages/fuse/src/operations/commit.rs | 7 +++ .../common/processors/sink_commit.rs | 37 ++++++++++++- src/query/storages/system/src/tables_table.rs | 12 +++++ src/tests/sqlsmith/src/sql_gen/ddl.rs | 1 + .../base/05_ddl/05_0000_ddl_create_tables | 2 +- .../09_fuse_engine/09_0017_transient_table | 40 ++++++++++++++ .../17_0003_alter_table_update.sh | 2 +- website/blog/2022-10-10-time-travel.md | 6 +++ 24 files changed, 208 insertions(+), 16 deletions(-) create mode 100644 tests/sqllogictests/suites/base/09_fuse_engine/09_0017_transient_table diff --git a/benchmark/clickbench/hits/create.sql b/benchmark/clickbench/hits/create.sql index b446288b409e..836c55c728a0 100644 --- a/benchmark/clickbench/hits/create.sql +++ b/benchmark/clickbench/hits/create.sql @@ -1,4 +1,4 @@ -CREATE TABLE hits +CREATE TRANSIENT TABLE hits ( WatchID BIGINT NOT NULL, JavaEnable SMALLINT NOT NULL, diff --git a/benchmark/clickbench/hits/create_local.sql b/benchmark/clickbench/hits/create_local.sql index 8bf12bf2c65b..3c0c7d51ed66 100644 --- a/benchmark/clickbench/hits/create_local.sql +++ b/benchmark/clickbench/hits/create_local.sql @@ -1,4 +1,4 @@ -CREATE TABLE hits +CREATE TRANSIENT TABLE hits ( WatchID BIGINT NOT NULL, JavaEnable SMALLINT NOT NULL, diff --git a/docs/doc/14-sql-commands/00-ddl/20-table/10-ddl-create-table.md b/docs/doc/14-sql-commands/00-ddl/20-table/10-ddl-create-table.md index ad8e730176a1..ab001529cb40 100644 --- a/docs/doc/14-sql-commands/00-ddl/20-table/10-ddl-create-table.md +++ b/docs/doc/14-sql-commands/00-ddl/20-table/10-ddl-create-table.md @@ -22,12 +22,13 @@ Databend aims to be easy to use by design and does NOT require any of those oper - [CREATE TABLE](#create-table): Creates a table from scratch. - [CREATE TABLE ... LIKE](#create-table--like): Creates a table with the same column definitions as an existing one. - [CREATE TABLE ... AS](#create-table--as): Creates a table and inserts data with the results of a SELECT query. +- [CREATE TRANSIENT TABLE](#create-transient-table): Creates a table without storing its historical data for Time Travel.. - [CREATE TABLE ... EXTERNAL_LOCATION](#create-table--external_location): Creates a table and specifies an S3 bucket for the data storage instead of the FUSE engine. ## CREATE TABLE ```sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name +CREATE [TRANSIENT] TABLE [IF NOT EXISTS] [db.]table_name ( [ NOT NULL | NULL] [ { DEFAULT }] [AS () STORED | VIRTUAL], [ NOT NULL | NULL] [ { DEFAULT }] [AS () STORED | VIRTUAL], @@ -53,12 +54,14 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name LIKE [db.]origin_table_name ``` -This command does not include any data or attributes (such as `CLUSTER BY`, `COMPRESSION`) from the original table, and instead creates a new table using the default system settings. +This command does not include any data or attributes (such as `CLUSTER BY`, `TRANSIENT`, and `COMPRESSION`) from the original table, and instead creates a new table using the default system settings. :::note WORKAROUND -- `COMPRESSION` can be explicitly specified when you create a new table with this command. For example, +- `TRANSIENT` and `COMPRESSION` can be explicitly specified when you create a new table with this command. For example, ```sql +create transient table t_new like t_old; + create table t_new compression='lz4' like t_old; ``` ::: @@ -73,16 +76,31 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name AS SELECT query ``` -This command does not include any attributes (such as CLUSTER BY, COMPRESSION) from the original table, and instead creates a new table using the default system settings. +This command does not include any attributes (such as CLUSTER BY, TRANSIENT, and COMPRESSION) from the original table, and instead creates a new table using the default system settings. :::note WORKAROUND -- `COMPRESSION` can be explicitly specified when you create a new table with this command. For example, +- `TRANSIENT` and `COMPRESSION` can be explicitly specified when you create a new table with this command. For example, ```sql +create transient table t_new as select * from t_old; + create table t_new compression='lz4' as select * from t_old; ``` ::: +## CREATE TRANSIENT TABLE + +Creates a transient table. + +Transient tables are used to hold transitory data that does not require a data protection or recovery mechanism. Dataebend does not hold historical data for a transient table so you will not be able to query from a previous version of the transient table with the Time Travel feature, for example, the [AT](./../../20-query-syntax/03-query-at.md) clause in the SELECT statement will not work for transient tables. Please note that you can still [drop](./20-ddl-drop-table.md) and [undrop](./21-ddl-undrop-table.md) a transient table. + +Transient tables help save your storage expenses because they do not need extra space for historical data compared to non-transient tables. See [example](#create-transient-table-1) for detailed explanations. + +Syntax: +```sql +CREATE TRANSIENT TABLE ... +``` + ## CREATE TABLE ... EXTERNAL_LOCATION Creates a table and specifies an S3 bucket for the data storage instead of the FUSE engine. @@ -331,6 +349,32 @@ SELECT * FROM books_backup; +----+----------------+---------+ ``` +### Create Transient Table + +Create a transient table (temporary table) that automatically deletes data after a specified period of time: + +```sql +-- Create a transient table +CREATE TRANSIENT TABLE visits ( + visitor_id BIGINT +); + +-- Insert values +INSERT INTO visits VALUES(1); +INSERT INTO visits VALUES(2); +INSERT INTO visits VALUES(3); + +-- Check the inserted data +SELECT * FROM visits; ++-----------+ +| visitor_id | ++-----------+ +| 1 | +| 2 | +| 3 | ++-----------+ +``` + ### Create Table ... External_Location Create a table with data stored on an external location, such as Amazon S3: diff --git a/docs/doc/14-sql-commands/00-ddl/20-table/70-flashback-table.md b/docs/doc/14-sql-commands/00-ddl/20-table/70-flashback-table.md index b5fade77c66f..be2dd8036e03 100644 --- a/docs/doc/14-sql-commands/00-ddl/20-table/70-flashback-table.md +++ b/docs/doc/14-sql-commands/00-ddl/20-table/70-flashback-table.md @@ -10,7 +10,7 @@ The capability to flash back a table is subject to these conditions: - The command only existing tables to their prior states. To recover a dropped table, use [UNDROP TABLE](21-ddl-undrop-table.md). -- Flashback a table is part of Databend's time travel feature. Before using the command, make sure the table you want to flashback is eligible for time travel. +- Flashback a table is part of Databend's time travel feature. Before using the command, make sure the table you want to flashback is eligible for time travel. For example, the command doesn't work for transient tables because Databend does not create or store snapshots for such tables. - You cannot roll back after flashback a table to a prior state, but you can flash back the table again to an earlier state. diff --git a/scripts/benchmark/query/load/hits.sh b/scripts/benchmark/query/load/hits.sh index 2214529cd4d5..f0c6d2a99576 100755 --- a/scripts/benchmark/query/load/hits.sh +++ b/scripts/benchmark/query/load/hits.sh @@ -15,7 +15,7 @@ VACUUM DROP TABLE retain 0 hours; SQL cat < RcDoc<'static> { - RcDoc::text("CREATE TABLE") + RcDoc::text("CREATE") + .append(if stmt.transient { + RcDoc::space().append(RcDoc::text("TRANSIENT")) + } else { + RcDoc::nil() + }) + .append(RcDoc::space().append(RcDoc::text("TABLE"))) .append(if stmt.if_not_exists { RcDoc::space().append(RcDoc::text("IF NOT EXISTS")) } else { diff --git a/src/query/ast/src/ast/statements/table.rs b/src/query/ast/src/ast/statements/table.rs index d6e206eb074f..1dcdca0cbb46 100644 --- a/src/query/ast/src/ast/statements/table.rs +++ b/src/query/ast/src/ast/statements/table.rs @@ -131,11 +131,16 @@ pub struct CreateTableStmt { pub cluster_by: Vec, pub table_options: BTreeMap, pub as_query: Option>, + pub transient: bool, } impl Display for CreateTableStmt { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { - write!(f, "CREATE TABLE ")?; + write!(f, "CREATE ")?; + if self.transient { + write!(f, "TRANSIENT ")?; + } + write!(f, "TABLE ")?; if self.if_not_exists { write!(f, "IF NOT EXISTS ")?; } diff --git a/src/query/ast/src/parser/statement.rs b/src/query/ast/src/parser/statement.rs index 8b245c2fbc62..1550b916ae44 100644 --- a/src/query/ast/src/parser/statement.rs +++ b/src/query/ast/src/parser/statement.rs @@ -536,7 +536,7 @@ pub fn statement(i: Input) -> IResult { ); let create_table = map( rule! { - CREATE ~ TABLE ~ ( IF ~ ^NOT ~ ^EXISTS )? + CREATE ~ TRANSIENT? ~ TABLE ~ ( IF ~ ^NOT ~ ^EXISTS )? ~ #dot_separated_idents_1_to_3 ~ #create_table_source? ~ ( #engine )? @@ -547,6 +547,7 @@ pub fn statement(i: Input) -> IResult { }, |( _, + opt_transient, _, opt_if_not_exists, (catalog, database, table), @@ -570,6 +571,7 @@ pub fn statement(i: Input) -> IResult { .unwrap_or_default(), table_options: opt_table_options.unwrap_or_default(), as_query: opt_as_query.map(|(_, query)| Box::new(query)), + transient: opt_transient.is_some(), }) }, ); diff --git a/src/query/ast/src/parser/token.rs b/src/query/ast/src/parser/token.rs index 5833b4f6696b..723188676413 100644 --- a/src/query/ast/src/parser/token.rs +++ b/src/query/ast/src/parser/token.rs @@ -900,6 +900,8 @@ pub enum TokenKind { TOKEN, #[token("TRAILING", ignore(ascii_case))] TRAILING, + #[token("TRANSIENT", ignore(ascii_case))] + TRANSIENT, #[token("TRIM", ignore(ascii_case))] TRIM, #[token("TRUE", ignore(ascii_case))] diff --git a/src/query/ast/tests/it/testdata/statement.txt b/src/query/ast/tests/it/testdata/statement.txt index 3451a0037665..d0202f907f47 100644 --- a/src/query/ast/tests/it/testdata/statement.txt +++ b/src/query/ast/tests/it/testdata/statement.txt @@ -592,6 +592,7 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, + transient: false, }, ) @@ -640,6 +641,7 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, + transient: false, }, ) @@ -719,6 +721,7 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, + transient: false, }, ) @@ -937,6 +940,7 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, + transient: false, }, ) @@ -1077,6 +1081,7 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, + transient: false, }, ) @@ -1197,6 +1202,7 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, + transient: false, }, ) @@ -1252,6 +1258,7 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, + transient: false, }, ) @@ -1293,6 +1300,7 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, + transient: false, }, ) @@ -1361,6 +1369,7 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, + transient: false, }, ) @@ -1431,6 +1440,7 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, + transient: false, }, ) @@ -1786,6 +1796,7 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, + transient: false, }, ) @@ -2490,6 +2501,7 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, + transient: false, }, ) @@ -2567,6 +2579,7 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, + transient: false, }, ) @@ -2612,6 +2625,7 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, + transient: false, }, ) @@ -2668,6 +2682,7 @@ CreateTable( cluster_by: [], table_options: {}, as_query: None, + transient: false, }, ) diff --git a/src/query/service/src/interpreters/interpreter_table_create.rs b/src/query/service/src/interpreters/interpreter_table_create.rs index d8d6572fd150..f574ef453e3f 100644 --- a/src/query/service/src/interpreters/interpreter_table_create.rs +++ b/src/query/service/src/interpreters/interpreter_table_create.rs @@ -374,6 +374,8 @@ pub static CREATE_TABLE_OPTIONS: Lazy> = Lazy::new(|| { r.insert(OPT_KEY_COMMENT); r.insert(OPT_KEY_ENGINE); + + r.insert("transient"); r }); diff --git a/src/query/service/src/interpreters/interpreter_table_show_create.rs b/src/query/service/src/interpreters/interpreter_table_show_create.rs index bb6617be2cba..151d20fcd45b 100644 --- a/src/query/service/src/interpreters/interpreter_table_show_create.rs +++ b/src/query/service/src/interpreters/interpreter_table_show_create.rs @@ -94,6 +94,9 @@ impl Interpreter for ShowCreateTableInterpreter { let n_fields = schema.fields().len(); let mut table_create_sql = format!("CREATE TABLE `{}` (\n", name); + if table.options().contains_key("TRANSIENT") { + table_create_sql = format!("CREATE TRANSIENT TABLE `{}` (\n", name) + } // Append columns. { diff --git a/src/query/service/tests/it/servers/http/http_query_handlers.rs b/src/query/service/tests/it/servers/http/http_query_handlers.rs index 66a01b57679d..5ef40a3edde6 100644 --- a/src/query/service/tests/it/servers/http/http_query_handlers.rs +++ b/src/query/service/tests/it/servers/http/http_query_handlers.rs @@ -128,7 +128,7 @@ async fn test_simple_sql() -> Result<()> { assert_eq!(result.state, ExecuteStateKind::Succeeded, "{:?}", result); assert_eq!(result.next_uri, Some(final_uri.clone()), "{:?}", result); assert_eq!(result.data.len(), 10, "{:?}", result); - assert_eq!(result.schema.len(), 17, "{:?}", result); + assert_eq!(result.schema.len(), 18, "{:?}", result); // get state let uri = make_state_uri(query_id); diff --git a/src/query/service/tests/it/storages/testdata/columns_table.txt b/src/query/service/tests/it/storages/testdata/columns_table.txt index 7f8b98d7cde3..4e0d627aa1ed 100644 --- a/src/query/service/tests/it/storages/testdata/columns_table.txt +++ b/src/query/service/tests/it/storages/testdata/columns_table.txt @@ -144,6 +144,8 @@ DB.Table: 'system'.'columns', Table: columns-table_id:1, ver:0, Engine: SystemCo | 'is_insertable_into' | 'information_schema' | 'views' | 'Boolean' | 'BOOLEAN' | '' | '' | 'NO' | '' | | 'is_nullable' | 'information_schema' | 'columns' | 'String' | 'VARCHAR' | '' | '' | 'NO' | '' | | 'is_nullable' | 'system' | 'columns' | 'String' | 'VARCHAR' | '' | '' | 'NO' | '' | +| 'is_transient' | 'system' | 'tables' | 'String' | 'VARCHAR' | '' | '' | 'NO' | '' | +| 'is_transient' | 'system' | 'tables_with_history' | 'String' | 'VARCHAR' | '' | '' | 'NO' | '' | | 'is_trigger_deletable' | 'information_schema' | 'views' | 'UInt8' | 'TINYINT UNSIGNED' | '' | '' | 'NO' | '' | | 'is_trigger_insertable_into' | 'information_schema' | 'views' | 'UInt8' | 'TINYINT UNSIGNED' | '' | '' | 'NO' | '' | | 'is_trigger_updatable' | 'information_schema' | 'views' | 'UInt8' | 'TINYINT UNSIGNED' | '' | '' | 'NO' | '' | diff --git a/src/query/sql/src/planner/binder/ddl/table.rs b/src/query/sql/src/planner/binder/ddl/table.rs index 043546fe05a5..655cf3fb4785 100644 --- a/src/query/sql/src/planner/binder/ddl/table.rs +++ b/src/query/sql/src/planner/binder/ddl/table.rs @@ -387,6 +387,7 @@ impl Binder { table_options, cluster_by, as_query, + transient, engine, uri_location, } = stmt; @@ -431,6 +432,11 @@ impl Binder { None => (None, "".to_string()), }; + // If table is TRANSIENT, set a flag in table option + if *transient { + options.insert("TRANSIENT".to_owned(), "T".to_owned()); + } + // Build table schema let (schema, field_comments) = match (&source, &as_query) { (Some(source), None) => { diff --git a/src/query/storages/fuse/src/fuse_table.rs b/src/query/storages/fuse/src/fuse_table.rs index 2410c479f309..71039e682160 100644 --- a/src/query/storages/fuse/src/fuse_table.rs +++ b/src/query/storages/fuse/src/fuse_table.rs @@ -338,6 +338,10 @@ impl FuseTable { }) } + pub fn transient(&self) -> bool { + self.table_info.meta.options.contains_key("TRANSIENT") + } + pub fn cluster_key_str(&self) -> Option<&String> { self.cluster_key_meta.as_ref().map(|(_, key)| key) } diff --git a/src/query/storages/fuse/src/operations/commit.rs b/src/query/storages/fuse/src/operations/commit.rs index 78b32ab80f9f..356902b52ea7 100644 --- a/src/query/storages/fuse/src/operations/commit.rs +++ b/src/query/storages/fuse/src/operations/commit.rs @@ -451,6 +451,13 @@ impl FuseTable { } } + #[inline] + pub fn is_error_recoverable(e: &ErrorCode, is_table_transient: bool) -> bool { + let code = e.code(); + code == ErrorCode::TABLE_VERSION_MISMATCHED + || (is_table_transient && code == ErrorCode::STORAGE_NOT_FOUND) + } + #[inline] pub fn no_side_effects_in_meta_store(e: &ErrorCode) -> bool { // currently, the only error that we know, which indicates there are no side effects diff --git a/src/query/storages/fuse/src/operations/common/processors/sink_commit.rs b/src/query/storages/fuse/src/operations/common/processors/sink_commit.rs index 3ac5406148f2..c2c36deeafb4 100644 --- a/src/query/storages/fuse/src/operations/common/processors/sink_commit.rs +++ b/src/query/storages/fuse/src/operations/common/processors/sink_commit.rs @@ -29,6 +29,8 @@ use common_meta_app::schema::TableInfo; use common_meta_app::schema::UpsertTableCopiedFileReq; use log::debug; use log::error; +use log::info; +use log::warn; use opendal::Operator; use storages_common_table_meta::meta::ClusterKey; use storages_common_table_meta::meta::SnapshotId; @@ -83,6 +85,7 @@ pub struct CommitSink { table: Arc, copied_files: Option, snapshot_gen: F, + transient: bool, retries: u64, max_retry_elapsed: Option, backoff: ExponentialBackoff, @@ -118,6 +121,7 @@ where F: SnapshotGenerator + Send + 'static snapshot_gen, abort_operation: AbortOperation::default(), heartbeat: TableLockHeartbeat::default(), + transient: table.transient(), backoff: ExponentialBackoff::default(), retries: 0, max_retry_elapsed, @@ -134,7 +138,7 @@ where F: SnapshotGenerator + Send + 'static if self.prev_snapshot_id.is_some() && e.code() == ErrorCode::TABLE_VERSION_MISMATCHED { return false; } - FuseTable::no_side_effects_in_meta_store(e) + FuseTable::is_error_recoverable(e, self.transient) } fn read_meta(&mut self) -> Result { @@ -321,6 +325,37 @@ where F: SnapshotGenerator + Send + 'static .await { Ok(_) => { + if self.transient { + // Removes historical data, if table is transient + let latest = self.table.refresh(self.ctx.as_ref()).await?; + let tbl = FuseTable::try_from_table(latest.as_ref())?; + + warn!( + "transient table detected, purging historical data. ({})", + tbl.table_info.ident + ); + + let keep_last_snapshot = true; + let snapshot_files = tbl.list_snapshot_files().await?; + if let Err(e) = tbl + .do_purge( + &self.ctx, + snapshot_files, + None, + keep_last_snapshot, + false, + ) + .await + { + // Errors of GC, if any, are ignored, since GC task can be picked up + warn!( + "GC of transient table not success (this is not a permanent error). the error : {}", + e + ); + } else { + info!("GC of transient table done"); + } + } metrics_inc_commit_mutation_success(); let duration = self.start_time.elapsed(); if let Some(files) = &self.copied_files { diff --git a/src/query/storages/system/src/tables_table.rs b/src/query/storages/system/src/tables_table.rs index 0e1cdec4d325..51849ac19493 100644 --- a/src/query/storages/system/src/tables_table.rs +++ b/src/query/storages/system/src/tables_table.rs @@ -263,6 +263,16 @@ where TablesTable: HistoryAware }) .collect(); let cluster_bys: Vec> = cluster_bys.iter().map(|s| s.as_bytes().to_vec()).collect(); + let is_transient: Vec> = database_tables + .iter() + .map(|v| { + if v.options().contains_key("TRANSIENT") { + "TRANSIENT".as_bytes().to_vec() + } else { + vec![] + } + }) + .collect(); Ok(DataBlock::new_from_columns(vec![ StringType::from_data(catalogs), StringType::from_data(databases), @@ -271,6 +281,7 @@ where TablesTable: HistoryAware StringType::from_data(engines), StringType::from_data(engines_full), StringType::from_data(cluster_bys), + StringType::from_data(is_transient), StringType::from_data(created_owns), StringType::from_data(dropped_owns), TimestampType::from_data(updated_on), @@ -297,6 +308,7 @@ where TablesTable: HistoryAware TableField::new("engine", TableDataType::String), TableField::new("engine_full", TableDataType::String), TableField::new("cluster_by", TableDataType::String), + TableField::new("is_transient", TableDataType::String), TableField::new("created_on", TableDataType::String), TableField::new("dropped_on", TableDataType::String), TableField::new("updated_on", TableDataType::Timestamp), diff --git a/src/tests/sqlsmith/src/sql_gen/ddl.rs b/src/tests/sqlsmith/src/sql_gen/ddl.rs index de7732ab5198..c91329af1d63 100644 --- a/src/tests/sqlsmith/src/sql_gen/ddl.rs +++ b/src/tests/sqlsmith/src/sql_gen/ddl.rs @@ -78,6 +78,7 @@ impl<'a, R: Rng> SqlGenerator<'a, R> { cluster_by: vec![], table_options: BTreeMap::new(), as_query: None, + transient: false, }; tables.push((drop_table, create_table)); } diff --git a/tests/sqllogictests/suites/base/05_ddl/05_0000_ddl_create_tables b/tests/sqllogictests/suites/base/05_ddl/05_0000_ddl_create_tables index c2a7f5c96207..611907fe6191 100644 --- a/tests/sqllogictests/suites/base/05_ddl/05_0000_ddl_create_tables +++ b/tests/sqllogictests/suites/base/05_ddl/05_0000_ddl_create_tables @@ -195,7 +195,7 @@ map MAP(INT32, STRING) NO {} (empty) variant VARIANT NO (empty) (empty) statement ok -create table db2.test8(tiny TINYINT not null, tiny_unsigned TINYINT UNSIGNED not null, smallint SMALLINT not null, smallint_unsigned SMALLINT UNSIGNED not null, int INT not null, int_unsigned INT UNSIGNED not null, bigint BIGINT not null, bigint_unsigned BIGINT UNSIGNED not null,float FLOAT not null, double DOUBLE not null, date DATE not null, datetime DATETIME not null, ts TIMESTAMP not null, str VARCHAR not null default '3', bool BOOLEAN not null, arr ARRAY(VARCHAR) not null, tup TUPLE(DOUBLE, INT) not null, map MAP(STRING, Date) not null, variant VARIANT not null) +create transient table db2.test8(tiny TINYINT not null, tiny_unsigned TINYINT UNSIGNED not null, smallint SMALLINT not null, smallint_unsigned SMALLINT UNSIGNED not null, int INT not null, int_unsigned INT UNSIGNED not null, bigint BIGINT not null, bigint_unsigned BIGINT UNSIGNED not null,float FLOAT not null, double DOUBLE not null, date DATE not null, datetime DATETIME not null, ts TIMESTAMP not null, str VARCHAR not null default '3', bool BOOLEAN not null, arr ARRAY(VARCHAR) not null, tup TUPLE(DOUBLE, INT) not null, map MAP(STRING, Date) not null, variant VARIANT not null) query TTTTT desc db2.test8 diff --git a/tests/sqllogictests/suites/base/09_fuse_engine/09_0017_transient_table b/tests/sqllogictests/suites/base/09_fuse_engine/09_0017_transient_table new file mode 100644 index 000000000000..52d2b201306b --- /dev/null +++ b/tests/sqllogictests/suites/base/09_fuse_engine/09_0017_transient_table @@ -0,0 +1,40 @@ +statement ok +DROP DATABASE IF EXISTS db1 + +statement ok +CREATE DATABASE db1 + +statement ok +USE db1 + +statement ok +CREATE TRANSIENT TABLE IF NOT EXISTS t09_0016(a int) + +statement ok +INSERT INTO t09_0016 VALUES(1) + +statement ok +INSERT INTO t09_0016 VALUES(2) + +statement ok +INSERT INTO t09_0016 VALUES(3) + +query I +select * from t09_0016 order by a +---- +1 +2 +3 + +query B +select count(*)=1 from fuse_snapshot('db1', 't09_0016') +---- +1 + + + +statement ok +DROP TABLE t09_0016 + +statement ok +DROP DATABASE db1 diff --git a/tests/suites/0_stateless/17_altertable/17_0003_alter_table_update.sh b/tests/suites/0_stateless/17_altertable/17_0003_alter_table_update.sh index e862a3bc85f8..583cfb563b3a 100755 --- a/tests/suites/0_stateless/17_altertable/17_0003_alter_table_update.sh +++ b/tests/suites/0_stateless/17_altertable/17_0003_alter_table_update.sh @@ -50,4 +50,4 @@ echo "update table column" echo "update t17_0003 set a=3 where a=1" | $MYSQL_CLIENT_CONNECT ## Drop table. -echo "drop table t17_0003" | $MYSQL_CLIENT_CONNECT \ No newline at end of file +echo "drop table t17_0003" | $MYSQL_CLIENT_CONNECT diff --git a/website/blog/2022-10-10-time-travel.md b/website/blog/2022-10-10-time-travel.md index 34d637ff396e..31fdcd821bab 100644 --- a/website/blog/2022-10-10-time-travel.md +++ b/website/blog/2022-10-10-time-travel.md @@ -40,3 +40,9 @@ The saved snapshots are the behind-the-scenes heroes that make the time travel b The Time Travel feature makes it possible to create an OLD table, which means you can create a table to hold and move on from a previous version of your data. The [CREATE TABLE](https://databend.rs/doc/sql-commands/ddl/table/ddl-create-table) statement can include a [SNAPSHOT_LOCATION](https://databend.rs/doc/sql-commands/ddl/table/ddl-create-table#create-table--snapshot_location) clause that allows you to specify a snapshot file that holds your old data. This command enables you to insert the data stored in the snapshot file when you create a table. Please note that the table you create must have same column definitions as the data from the snapshot. + +## Go without Time Travel + +Tables in Databend support Time Travel out-of-the-box. However, you might not need it for some cases, for example, when you're running low of your storage space or the data is big but unimportant. Databend currently does not provide a setting to switch it off, but you can [CREATE TRANSIENT TABLE](https://databend.rs/doc/sql-commands/ddl/table/ddl-create-table#create-transient-table). + +Transient tables are used to hold transitory data that does not require a data protection or recovery mechanism. Databend does not hold historical data for a transient table so you will not be able to query from a previous version of the transient table with the Time Travel feature, for example, the AT clause in the SELECT statement will not work for transient tables. Please note that you can still drop and undrop a transient table. From cdc045eecc3f44d0f1d6196ea3d684ade52480bb Mon Sep 17 00:00:00 2001 From: zhyass Date: Wed, 27 Sep 2023 12:45:58 +0800 Subject: [PATCH 9/9] fix test --- src/query/ast/tests/it/testdata/statement.txt | 3 +++ website/blog/2022-10-10-time-travel.md | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/query/ast/tests/it/testdata/statement.txt b/src/query/ast/tests/it/testdata/statement.txt index d0202f907f47..8c97eef733f5 100644 --- a/src/query/ast/tests/it/testdata/statement.txt +++ b/src/query/ast/tests/it/testdata/statement.txt @@ -857,6 +857,7 @@ CreateTable( ignore_result: false, }, ), + transient: false, }, ) @@ -2775,6 +2776,7 @@ CreateTable( ignore_result: false, }, ), + transient: false, }, ) @@ -8457,6 +8459,7 @@ CreateTable( "comment": "table comment", }, as_query: None, + transient: false, }, ) diff --git a/website/blog/2022-10-10-time-travel.md b/website/blog/2022-10-10-time-travel.md index 31fdcd821bab..4316f1146d3e 100644 --- a/website/blog/2022-10-10-time-travel.md +++ b/website/blog/2022-10-10-time-travel.md @@ -45,4 +45,4 @@ The [CREATE TABLE](https://databend.rs/doc/sql-commands/ddl/table/ddl-create-tab Tables in Databend support Time Travel out-of-the-box. However, you might not need it for some cases, for example, when you're running low of your storage space or the data is big but unimportant. Databend currently does not provide a setting to switch it off, but you can [CREATE TRANSIENT TABLE](https://databend.rs/doc/sql-commands/ddl/table/ddl-create-table#create-transient-table). -Transient tables are used to hold transitory data that does not require a data protection or recovery mechanism. Databend does not hold historical data for a transient table so you will not be able to query from a previous version of the transient table with the Time Travel feature, for example, the AT clause in the SELECT statement will not work for transient tables. Please note that you can still drop and undrop a transient table. +Transient tables are used to hold transitory data that does not require a data protection or recovery mechanism. Databend does not hold historical data for a transient table so you will not be able to query from a previous version of the transient table with the Time Travel feature, for example, the AT clause in the SELECT statement will not work for transient tables. Please note that you can still drop and undrop a transient table. \ No newline at end of file