Skip to content

Commit

Permalink
fix style
Browse files Browse the repository at this point in the history
  • Loading branch information
huaxingao committed Jun 27, 2024
1 parent e575d9c commit 1b174a0
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 7 deletions.
10 changes: 5 additions & 5 deletions core/src/execution/datafusion/planner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ use datafusion_common::{
};
use datafusion_expr::expr::find_df_window_func;
use datafusion_expr::{
ScalarUDF, WindowFrame, WindowFrameBound, WindowFrameUnits, WindowFunctionDefinition,
ScalarUDF, WindowFrame, WindowFrameBound, WindowFrameUnits,
};
use datafusion_physical_expr::window::WindowExpr;
use datafusion_physical_expr_common::aggregate::create_aggregate_expr;
Expand Down Expand Up @@ -1380,12 +1380,12 @@ impl PhysicalPlanner {
partition_by: &[Arc<dyn PhysicalExpr>],
sort_exprs: &[PhysicalSortExpr],
) -> Result<Arc<dyn WindowExpr>, ExecutionError> {
let (window_func_name, window_func_args);
let (mut window_func_name, mut window_func_args) = (String::new(), Vec::new());
if let Some(func) = &spark_expr.built_in_window_function {
match &func.expr_struct {
Some(ExprStruct::ScalarFunc(f)) => {
window_func_name = f.func.clone();
window_func_args = f.args.clone();
window_func_name.clone_from(&f.func);
window_func_args.clone_from(&f.args);
}
other => {
return Err(ExecutionError::GeneralError(format!(
Expand Down Expand Up @@ -1518,7 +1518,7 @@ impl PhysicalPlanner {
Ok(("max".to_string(), optional_expr_to_vec(&expr.child)))
}
other => {
return Err(ExecutionError::GeneralError(format!(
Err(ExecutionError::GeneralError(format!(
"{other:?} not supported for window function"
)))
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, Expression,
import org.apache.spark.sql.catalyst.plans.physical.Partitioning
import org.apache.spark.sql.comet.CometWindowExec.getNativePlan
import org.apache.spark.sql.execution.{SparkPlan, UnaryExecNode}
import org.apache.spark.sql.execution.metric.{SQLMetrics, SQLShuffleReadMetricsReporter, SQLShuffleWriteMetricsReporter}
import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics, SQLShuffleReadMetricsReporter, SQLShuffleWriteMetricsReporter}
import org.apache.spark.sql.vectorized.ColumnarBatch

import org.apache.comet.serde.OperatorOuterClass
Expand Down Expand Up @@ -57,7 +57,7 @@ case class CometWindowExec(
SQLShuffleWriteMetricsReporter.createShuffleWriteMetrics(sparkContext)
private lazy val readMetrics =
SQLShuffleReadMetricsReporter.createShuffleReadMetrics(sparkContext)
override lazy val metrics = Map(
override lazy val metrics: Map[String, SQLMetric] = Map(
"dataSize" -> SQLMetrics.createSizeMetric(sparkContext, "data size"),
"shuffleReadElapsedCompute" ->
SQLMetrics.createNanoTimingMetric(sparkContext, "shuffle read elapsed compute at native"),
Expand Down

0 comments on commit 1b174a0

Please sign in to comment.