Skip to content

Commit

Permalink
Merge remote-tracking branch 'refs/remotes/upstream/main' into bugfix…
Browse files Browse the repository at this point in the history
…/abs_ansi_mode
  • Loading branch information
planga82 committed Jun 6, 2024
2 parents 19969d6 + 6143e7a commit 6fb873a
Show file tree
Hide file tree
Showing 20 changed files with 1,353 additions and 518 deletions.
27 changes: 0 additions & 27 deletions .github/workflows/pr_build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -97,15 +97,6 @@ jobs:
with:
rust-version: ${{env.RUST_VERSION}}
jdk-version: ${{ matrix.java_version }}
- name: Clone Spark
uses: actions/checkout@v4
with:
repository: "apache/spark"
path: "apache-spark"
- name: Install Spark
shell: bash
working-directory: ./apache-spark
run: build/mvn install -Phive -Phadoop-cloud -DskipTests
- name: Java test steps
uses: ./.github/actions/java-test
with:
Expand Down Expand Up @@ -223,15 +214,6 @@ jobs:
with:
rust-version: ${{env.RUST_VERSION}}
jdk-version: ${{ matrix.java_version }}
- name: Clone Spark
uses: actions/checkout@v4
with:
repository: "apache/spark"
path: "apache-spark"
- name: Install Spark
shell: bash
working-directory: ./apache-spark
run: build/mvn install -Phive -Phadoop-cloud -DskipTests
- name: Java test steps
uses: ./.github/actions/java-test
with:
Expand Down Expand Up @@ -261,15 +243,6 @@ jobs:
jdk-version: ${{ matrix.java_version }}
jdk-architecture: aarch64
protoc-architecture: aarch_64
- name: Clone Spark
uses: actions/checkout@v4
with:
repository: "apache/spark"
path: "apache-spark"
- name: Install Spark
shell: bash
working-directory: ./apache-spark
run: build/mvn install -Phive -Phadoop-cloud -DskipTests
- name: Java test steps
uses: ./.github/actions/java-test
with:
Expand Down
3 changes: 3 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,9 @@ release-linux: clean
release:
cd core && RUSTFLAGS="-Ctarget-cpu=native" cargo build --release
./mvnw install -Prelease -DskipTests $(PROFILES)
release-nogit:
cd core && RUSTFLAGS="-Ctarget-cpu=native" cargo build --features nightly --release
./mvnw install -Prelease -DskipTests $(PROFILES) -Dmaven.gitcommitid.skip=true
benchmark-%: clean release
cd spark && COMET_CONF_DIR=$(shell pwd)/conf MAVEN_OPTS='-Xmx20g' ../mvnw exec:java -Dexec.mainClass="$*" -Dexec.classpathScope="test" -Dexec.cleanupDaemonThreads="false" -Dexec.args="$(filter-out $@,$(MAKECMDGOALS))" $(PROFILES)
.DEFAULT:
Expand Down
1 change: 1 addition & 0 deletions core/src/execution/datafusion/expressions/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ pub mod strings;
pub mod subquery;
pub mod sum_decimal;
pub mod temporal;
pub mod unbound;
mod utils;
pub mod variance;

Expand Down
110 changes: 110 additions & 0 deletions core/src/execution/datafusion/expressions/unbound.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

use crate::execution::datafusion::expressions::utils::down_cast_any_ref;
use arrow_array::RecordBatch;
use arrow_schema::{DataType, Schema};
use datafusion::physical_plan::ColumnarValue;
use datafusion_common::{internal_err, Result};
use datafusion_physical_expr::PhysicalExpr;
use std::{
any::Any,
hash::{Hash, Hasher},
sync::Arc,
};

/// This is similar to `UnKnownColumn` in DataFusion, but it has data type.
/// This is only used when the column is not bound to a schema, for example, the
/// inputs to aggregation functions in final aggregation. In the case, we cannot
/// bind the aggregation functions to the input schema which is grouping columns
/// and aggregate buffer attributes in Spark (DataFusion has different design).
/// But when creating certain aggregation functions, we need to know its input
/// data types. As `UnKnownColumn` doesn't have data type, we implement this
/// `UnboundColumn` to carry the data type.
#[derive(Debug, Hash, PartialEq, Eq, Clone)]
pub struct UnboundColumn {
name: String,
datatype: DataType,
}

impl UnboundColumn {
/// Create a new unbound column expression
pub fn new(name: &str, datatype: DataType) -> Self {
Self {
name: name.to_owned(),
datatype,
}
}

/// Get the column name
pub fn name(&self) -> &str {
&self.name
}
}

impl std::fmt::Display for UnboundColumn {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}, datatype: {}", self.name, self.datatype)
}
}

impl PhysicalExpr for UnboundColumn {
/// Return a reference to Any that can be used for downcasting
fn as_any(&self) -> &dyn std::any::Any {
self
}

/// Get the data type of this expression, given the schema of the input
fn data_type(&self, _input_schema: &Schema) -> Result<DataType> {
Ok(self.datatype.clone())
}

/// Decide whether this expression is nullable, given the schema of the input
fn nullable(&self, _input_schema: &Schema) -> Result<bool> {
Ok(true)
}

/// Evaluate the expression
fn evaluate(&self, _batch: &RecordBatch) -> Result<ColumnarValue> {
internal_err!("UnboundColumn::evaluate() should not be called")
}

fn children(&self) -> Vec<Arc<dyn PhysicalExpr>> {
vec![]
}

fn with_new_children(
self: Arc<Self>,
_children: Vec<Arc<dyn PhysicalExpr>>,
) -> Result<Arc<dyn PhysicalExpr>> {
Ok(self)
}

fn dyn_hash(&self, state: &mut dyn Hasher) {
let mut s = state;
self.hash(&mut s);
}
}

impl PartialEq<dyn Any> for UnboundColumn {
fn eq(&self, other: &dyn Any) -> bool {
down_cast_any_ref(other)
.downcast_ref::<Self>()
.map(|x| self == x)
.unwrap_or(false)
}
}
11 changes: 9 additions & 2 deletions core/src/execution/datafusion/planner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ use datafusion::{
expressions::{
in_list, BinaryExpr, BitAnd, BitOr, BitXor, CaseExpr, CastExpr, Column, Count,
FirstValue, InListExpr, IsNotNullExpr, IsNullExpr, LastValue,
Literal as DataFusionLiteral, Max, Min, NotExpr, Sum, UnKnownColumn,
Literal as DataFusionLiteral, Max, Min, NotExpr, Sum,
},
AggregateExpr, PhysicalExpr, PhysicalSortExpr, ScalarFunctionExpr,
},
Expand Down Expand Up @@ -78,6 +78,7 @@ use crate::{
subquery::Subquery,
sum_decimal::SumDecimal,
temporal::{DateTruncExec, HourExec, MinuteExec, SecondExec, TimestampTruncExec},
unbound::UnboundColumn,
variance::Variance,
NormalizeNaNAndZero,
},
Expand Down Expand Up @@ -241,7 +242,13 @@ impl PhysicalPlanner {
let field = input_schema.field(idx);
Ok(Arc::new(Column::new(field.name().as_str(), idx)))
}
ExprStruct::Unbound(unbound) => Ok(Arc::new(UnKnownColumn::new(unbound.name.as_str()))),
ExprStruct::Unbound(unbound) => {
let data_type = to_arrow_datatype(unbound.datatype.as_ref().unwrap());
Ok(Arc::new(UnboundColumn::new(
unbound.name.as_str(),
data_type,
)))
}
ExprStruct::IsNotNull(is_notnull) => {
let child = self.create_expr(is_notnull.child.as_ref().unwrap(), input_schema)?;
Ok(Arc::new(IsNotNullExpr::new(child)))
Expand Down
6 changes: 6 additions & 0 deletions docs/source/user-guide/installation.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,12 @@ Note that the project builds for Scala 2.12 by default but can be built for Scal
make release PROFILES="-Pspark-3.4 -Pscala-2.13"
```

To build Comet from the source distribution on an isolated environment without an access to `github.com` it is necessary to disable `git-commit-id-maven-plugin`, otherwise you will face errors that there is no access to the git during the build process. In that case you may use:

```console
make release-nogit PROFILES="-Pspark-3.4"
```

## Run Spark Shell with Comet enabled

Make sure `SPARK_HOME` points to the same Spark version as Comet was built for.
Expand Down
2 changes: 1 addition & 1 deletion docs/source/user-guide/overview.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ Comet aims to support:
- a native Parquet implementation, including both reader and writer
- full implementation of Spark operators, including
Filter/Project/Aggregation/Join/Exchange etc.
- full implementation of Spark built-in expressions
- full implementation of Spark built-in expressions.
- a UDF framework for users to migrate their existing UDF to native

## Architecture
Expand Down
2 changes: 2 additions & 0 deletions docs/source/user-guide/tuning.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ It must be set before the Spark context is created. You can enable or disable Co
at runtime by setting `spark.comet.exec.shuffle.enabled` to `true` or `false`.
Once it is disabled, Comet will fallback to the default Spark shuffle manager.

> **_NOTE:_** At the moment Comet Shuffle is not compatible with Spark AQE partition coalesce. To disable set `spark.sql.adaptive.coalescePartitions.enabled` to `false`.
### Shuffle Mode

Comet provides three shuffle modes: Columnar Shuffle, Native Shuffle and Auto Mode.
Expand Down
Loading

0 comments on commit 6fb873a

Please sign in to comment.