Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: hilbert clustering #17045

Draft
wants to merge 12 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,7 @@ databend-enterprise-attach-table = { path = "src/query/ee_features/attach_table"
databend-enterprise-background-service = { path = "src/query/ee_features/background_service" }
databend-enterprise-data-mask-feature = { path = "src/query/ee_features/data_mask" }
databend-enterprise-fail-safe = { path = "src/query/ee_features/fail_safe" }
databend-enterprise-hilbert-clustering = { path = "src/query/ee_features/hilbert_clustering" }
databend-enterprise-inverted-index = { path = "src/query/ee_features/inverted_index" }
databend-enterprise-meta = { path = "src/meta/ee" }
databend-enterprise-query = { path = "src/query/ee" }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ impl<Labels: FamilyLabels> FamilyCounter<Labels> {
FamilyCounter {
index,
labels,
value: Arc::new(Default::default()),
value: Default::default(),
}
}

Expand Down
3 changes: 2 additions & 1 deletion src/common/exception/src/exception_code.rs
Original file line number Diff line number Diff line change
Expand Up @@ -419,8 +419,9 @@ build_exceptions! {
// recluster error codes
NoNeedToRecluster(4011),
NoNeedToCompact(4012),
UnsupportedClusterType(4013),

RefreshTableInfoFailure(4012),
RefreshTableInfoFailure(4021),
}

// Service errors [5001,6000].
Expand Down
14 changes: 12 additions & 2 deletions src/common/license/src/license.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,8 @@ pub enum Feature {
StorageQuota(StorageQuota),
#[serde(alias = "amend_table", alias = "AMEND_TABLE")]
AmendTable,
#[serde(alias = "hilbert_clustering", alias = "HILBERT_CLUSTERING")]
HilbertClustering,
#[serde(other)]
Unknown,
}
Expand Down Expand Up @@ -119,6 +121,7 @@ impl fmt::Display for Feature {
write!(f, ")")
}
Feature::AmendTable => write!(f, "amend_table"),
Feature::HilbertClustering => write!(f, "hilbert_clustering"),
Feature::Unknown => write!(f, "unknown"),
}
}
Expand Down Expand Up @@ -166,7 +169,8 @@ impl Feature {
| (Feature::InvertedIndex, Feature::InvertedIndex)
| (Feature::VirtualColumn, Feature::VirtualColumn)
| (Feature::AttacheTable, Feature::AttacheTable)
| (Feature::StorageEncryption, Feature::StorageEncryption) => Ok(true),
| (Feature::StorageEncryption, Feature::StorageEncryption)
| (Feature::HilbertClustering, Feature::HilbertClustering) => Ok(true),
(_, _) => Ok(false),
}
}
Expand Down Expand Up @@ -334,6 +338,11 @@ mod tests {
serde_json::from_str::<Feature>("\"amend_table\"").unwrap()
);

assert_eq!(
Feature::HilbertClustering,
serde_json::from_str::<Feature>("\"hilbert_clustering\"").unwrap()
);

assert_eq!(
Feature::Unknown,
serde_json::from_str::<Feature>("\"ssss\"").unwrap()
Expand Down Expand Up @@ -367,11 +376,12 @@ mod tests {
storage_usage: Some(1),
}),
Feature::AmendTable,
Feature::HilbertClustering,
]),
};

assert_eq!(
"LicenseInfo{ type: enterprise, org: databend, tenants: [databend_tenant,foo], features: [aggregate_index,amend_table,attach_table,background_service,compute_quota(threads_num: 1, memory_usage: 1),computed_column,data_mask,inverted_index,license_info,storage_encryption,storage_quota(storage_usage: 1),stream,vacuum,virtual_column] }",
"LicenseInfo{ type: enterprise, org: databend, tenants: [databend_tenant,foo], features: [aggregate_index,amend_table,attach_table,background_service,compute_quota(threads_num: 1, memory_usage: 1),computed_column,data_mask,hilbert_clustering,inverted_index,license_info,storage_encryption,storage_quota(storage_usage: 1),stream,vacuum,virtual_column] }",
license_info.to_string()
);
}
Expand Down
8 changes: 8 additions & 0 deletions src/query/catalog/src/plan/partition.rs
Original file line number Diff line number Diff line change
Expand Up @@ -391,3 +391,11 @@ impl ReclusterParts {
}
}
}

// TODO refine this
#[derive(serde::Serialize, serde::Deserialize, Clone, Debug, Default)]
pub struct ReclusterInfoSideCar {
pub merged_blocks: Vec<Arc<BlockMeta>>,
pub removed_segment_indexes: Vec<usize>,
pub removed_statistics: Statistics,
}
76 changes: 43 additions & 33 deletions src/query/catalog/src/table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,20 +14,25 @@

use std::any::Any;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::sync::Arc;

use chrono::DateTime;
use chrono::Utc;
use databend_common_ast::ast::Expr;
use databend_common_ast::parser::parse_comma_separated_exprs;
use databend_common_ast::parser::tokenize_sql;
use databend_common_exception::ErrorCode;
use databend_common_exception::Result;
use databend_common_expression::BlockThresholds;
use databend_common_expression::ColumnId;
use databend_common_expression::RemoteExpr;
use databend_common_expression::Scalar;
use databend_common_expression::TableSchema;
use databend_common_io::constants::DEFAULT_BLOCK_BUFFER_SIZE;
use databend_common_io::constants::DEFAULT_BLOCK_MAX_ROWS;
use databend_common_io::constants::DEFAULT_BLOCK_MIN_ROWS;
use databend_common_meta_app::app_error::AppError;
use databend_common_meta_app::app_error::UnknownTableId;
use databend_common_meta_app::schema::TableIdent;
use databend_common_meta_app::schema::TableInfo;
use databend_common_meta_app::schema::TableMeta;
Expand All @@ -37,9 +42,12 @@ use databend_common_meta_types::MetaId;
use databend_common_pipeline_core::Pipeline;
use databend_common_storage::Histogram;
use databend_common_storage::StorageMetrics;
use databend_storages_common_table_meta::meta::ClusterKey;
use databend_storages_common_table_meta::meta::SnapshotId;
use databend_storages_common_table_meta::meta::TableSnapshot;
use databend_storages_common_table_meta::table::ChangeType;
use databend_storages_common_table_meta::table::ClusterType;
use databend_storages_common_table_meta::table::OPT_KEY_CLUSTER_TYPE;
use databend_storages_common_table_meta::table::OPT_KEY_TEMP_PREFIX;
use databend_storages_common_table_meta::table_id_ranges::is_temp_table_id;

Expand Down Expand Up @@ -117,8 +125,40 @@ pub trait Table: Sync + Send {
false
}

fn cluster_keys(&self, _ctx: Arc<dyn TableContext>) -> Vec<RemoteExpr<String>> {
vec![]
fn cluster_key_meta(&self) -> Option<ClusterKey> {
None
}

fn cluster_type(&self) -> Option<ClusterType> {
self.cluster_key_meta()?;
let cluster_type = self
.options()
.get(OPT_KEY_CLUSTER_TYPE)
.and_then(|s| s.parse::<ClusterType>().ok())
.unwrap_or(ClusterType::Linear);
Some(cluster_type)
}

fn resolve_cluster_keys(&self, ctx: Arc<dyn TableContext>) -> Option<Vec<Expr>> {
let Some((_, cluster_key_str)) = &self.cluster_key_meta() else {
return None;
};
let tokens = tokenize_sql(cluster_key_str).unwrap();
let sql_dialect = ctx.get_settings().get_sql_dialect().unwrap_or_default();
let mut ast_exprs = parse_comma_separated_exprs(&tokens, sql_dialect).unwrap();
// unwrap tuple.
if ast_exprs.len() == 1 {
if let Expr::Tuple { exprs, .. } = &ast_exprs[0] {
ast_exprs = exprs.clone();
}
} else {
// Defensive check:
// `ast_exprs` should always contain one element which can be one of the following:
// 1. A tuple of composite cluster keys
// 2. A single cluster key
unreachable!("invalid cluster key ast expression, {:?}", ast_exprs);
}
Some(ast_exprs)
}

fn change_tracking_enabled(&self) -> bool {
Expand Down Expand Up @@ -159,31 +199,6 @@ pub trait Table: Sync + Send {
false
}

#[async_backtrace::framed]
async fn alter_table_cluster_keys(
&self,
ctx: Arc<dyn TableContext>,
cluster_key: String,
cluster_type: String,
) -> Result<()> {
let (_, _, _) = (ctx, cluster_key, cluster_type);

Err(ErrorCode::UnsupportedEngineParams(format!(
"Altering table cluster keys is not supported for the '{}' engine.",
self.engine()
)))
}

#[async_backtrace::framed]
async fn drop_table_cluster_keys(&self, ctx: Arc<dyn TableContext>) -> Result<()> {
let _ = ctx;

Err(ErrorCode::UnsupportedEngineParams(format!(
"Dropping table cluster keys is not supported for the '{}' engine.",
self.engine()
)))
}

/// Gather partitions to be scanned according to the push_downs
#[async_backtrace::framed]
async fn read_partitions(
Expand Down Expand Up @@ -584,11 +599,6 @@ pub struct NavigationDescriptor {
pub point: NavigationPoint,
}

use std::collections::HashMap;

use databend_common_meta_app::app_error::AppError;
use databend_common_meta_app::app_error::UnknownTableId;

#[derive(serde::Serialize, serde::Deserialize, Clone, Debug, Default)]
pub struct ParquetTableColumnStatisticsProvider {
column_stats: HashMap<ColumnId, Option<BasicColumnStatistics>>,
Expand Down
18 changes: 15 additions & 3 deletions src/query/catalog/src/table_context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -279,11 +279,23 @@ pub trait TableContext: Send + Sync {
max_files: Option<usize>,
) -> Result<FilteredCopyFiles>;

fn add_segment_location(&self, segment_loc: Location) -> Result<()>;
fn add_inserted_segment_location(&self, segment_loc: Location) -> Result<()>;

fn clear_segment_locations(&self) -> Result<()>;
fn clear_inserted_segment_locations(&self) -> Result<()>;

fn get_segment_locations(&self) -> Result<Vec<Location>>;
fn get_inserted_segment_locations(&self) -> Result<Vec<Location>>;

fn add_selected_segment_location(&self, _segment_loc: Location) {
unimplemented!()
}

fn get_selected_segment_locations(&self) -> Vec<Location> {
unimplemented!()
}

fn clear_selected_segment_locations(&self) {
unimplemented!()
}

fn add_file_status(&self, file_path: &str, file_status: FileStatus) -> Result<()>;

Expand Down
1 change: 1 addition & 0 deletions src/query/ee/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ databend-enterprise-attach-table = { workspace = true }
databend-enterprise-background-service = { workspace = true }
databend-enterprise-data-mask-feature = { workspace = true }
databend-enterprise-fail-safe = { workspace = true }
databend-enterprise-hilbert-clustering = { workspace = true }
databend-enterprise-inverted-index = { workspace = true }
databend-enterprise-storage-encryption = { workspace = true }
databend-enterprise-storage-quota = { workspace = true }
Expand Down
2 changes: 2 additions & 0 deletions src/query/ee/src/enterprise_services.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ use crate::attach_table::RealAttachTableHandler;
use crate::background_service::RealBackgroundService;
use crate::data_mask::RealDatamaskHandler;
use crate::fail_safe::RealFailSafeHandler;
use crate::hilbert_clustering::RealHilbertClusteringHandler;
use crate::inverted_index::RealInvertedIndexHandler;
use crate::license::license_mgr::RealLicenseManager;
use crate::storage_encryption::RealStorageEncryptionHandler;
Expand All @@ -45,6 +46,7 @@ impl EnterpriseServices {
RealInvertedIndexHandler::init()?;
RealStorageQuotaHandler::init(&cfg)?;
RealFailSafeHandler::init()?;
RealHilbertClusteringHandler::init()?;
Ok(())
}
}
Loading
Loading