From df94429cff95e4ef2acda035d2fa801fdbd35dd5 Mon Sep 17 00:00:00 2001 From: Xianjin YE Date: Mon, 4 Mar 2024 23:50:45 +0800 Subject: [PATCH] chore: Fix warnings in both compiler and test environments --- .../src/main/java/org/apache/comet/parquet/BatchReader.java | 1 + core/src/lib.rs | 3 ++- pom.xml | 4 ++-- .../scala/org/apache/comet/CometSparkSessionExtensions.scala | 2 +- .../main/scala/org/apache/comet/serde/QueryPlanSerde.scala | 4 ++-- .../org/apache/spark/sql/comet/CometCollectLimitExec.scala | 4 ++-- .../src/test/scala/org/apache/comet/exec/CometExecSuite.scala | 1 - .../test/scala/org/apache/spark/sql/CometTPCHQuerySuite.scala | 2 +- 8 files changed, 11 insertions(+), 10 deletions(-) diff --git a/common/src/main/java/org/apache/comet/parquet/BatchReader.java b/common/src/main/java/org/apache/comet/parquet/BatchReader.java index 87302b372..9940390dc 100644 --- a/common/src/main/java/org/apache/comet/parquet/BatchReader.java +++ b/common/src/main/java/org/apache/comet/parquet/BatchReader.java @@ -517,6 +517,7 @@ public void close() throws IOException { } } + @SuppressWarnings("deprecation") private boolean loadNextRowGroupIfNecessary() throws Throwable { // More rows can be read from loaded row group. No need to load next one. if (rowsRead != totalRowsLoaded) return true; diff --git a/core/src/lib.rs b/core/src/lib.rs index d10478885..2e8513620 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -34,7 +34,7 @@ use jni::{ }; use log::{info, LevelFilter}; use log4rs::{ - append::console::ConsoleAppender, + append::console::{ConsoleAppender, Target}, config::{load_config_file, Appender, Deserializers, Root}, encode::pattern::PatternEncoder, Config, @@ -99,6 +99,7 @@ const LOG_PATTERN: &str = "{d(%y/%m/%d %H:%M:%S)} {l} {f}: {m}{n}"; // Creates a default log4rs config, which logs to console with `INFO` level. fn default_logger_config() -> CometResult { let console_append = ConsoleAppender::builder() + .target(Target::Stderr) .encoder(Box::new(PatternEncoder::new(LOG_PATTERN))) .build(); let appender = Appender::builder().build("console", Box::new(console_append)); diff --git a/pom.xml b/pom.xml index aa59d19aa..657eb977d 100644 --- a/pom.xml +++ b/pom.xml @@ -711,9 +711,9 @@ under the License. maven-surefire-plugin 3.1.0 - + file:src/test/resources/log4j2.properties - + -ea -Xmx4g -Xss4m ${extraJavaTestArgs} diff --git a/spark/src/main/scala/org/apache/comet/CometSparkSessionExtensions.scala b/spark/src/main/scala/org/apache/comet/CometSparkSessionExtensions.scala index 8037f5559..87c2265fc 100644 --- a/spark/src/main/scala/org/apache/comet/CometSparkSessionExtensions.scala +++ b/spark/src/main/scala/org/apache/comet/CometSparkSessionExtensions.scala @@ -115,7 +115,7 @@ class CometSparkSessionExtensions // data source V1 case scanExec @ FileSourceScanExec( HadoopFsRelation(_, partitionSchema, _, _, _: ParquetFileFormat, _), - _: Seq[AttributeReference], + _: Seq[_], requiredSchema, _, _, diff --git a/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala b/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala index 75a2ff981..a412720e5 100644 --- a/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala +++ b/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala @@ -1472,14 +1472,14 @@ object QueryPlanSerde extends Logging with ShimQueryPlanSerde { // With Spark 3.4, CharVarcharCodegenUtils.readSidePadding gets called to pad spaces for char // types. Use rpad to achieve the behavior. See https://github.com/apache/spark/pull/38151 case StaticInvoke( - _: Class[CharVarcharCodegenUtils], + clz: Class[_], _: StringType, "readSidePadding", arguments, _, true, false, - true) if arguments.size == 2 => + true) if clz == classOf[CharVarcharCodegenUtils] && arguments.size == 2 => val argsExpr = Seq( exprToProtoInternal(Cast(arguments(0), StringType), inputs), exprToProtoInternal(arguments(1), inputs)) diff --git a/spark/src/main/scala/org/apache/spark/sql/comet/CometCollectLimitExec.scala b/spark/src/main/scala/org/apache/spark/sql/comet/CometCollectLimitExec.scala index 83126a7ba..dd4855126 100644 --- a/spark/src/main/scala/org/apache/spark/sql/comet/CometCollectLimitExec.scala +++ b/spark/src/main/scala/org/apache/spark/sql/comet/CometCollectLimitExec.scala @@ -19,8 +19,6 @@ package org.apache.spark.sql.comet -import java.util.Objects - import org.apache.spark.rdd.RDD import org.apache.spark.serializer.Serializer import org.apache.spark.sql.catalyst.InternalRow @@ -29,6 +27,8 @@ import org.apache.spark.sql.execution.{ColumnarToRowExec, SparkPlan, UnaryExecNo import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics, SQLShuffleReadMetricsReporter, SQLShuffleWriteMetricsReporter} import org.apache.spark.sql.vectorized.ColumnarBatch +import com.google.common.base.Objects + /** * Comet physical plan node for Spark `CollectLimitExec`. * diff --git a/spark/src/test/scala/org/apache/comet/exec/CometExecSuite.scala b/spark/src/test/scala/org/apache/comet/exec/CometExecSuite.scala index 0ed719fab..6a34d4fe4 100644 --- a/spark/src/test/scala/org/apache/comet/exec/CometExecSuite.scala +++ b/spark/src/test/scala/org/apache/comet/exec/CometExecSuite.scala @@ -291,7 +291,6 @@ class CometExecSuite extends CometTestBase { val exchanges = stripAQEPlan(df.queryExecution.executedPlan).collect { case s: CometShuffleExchangeExec if s.shuffleType == CometColumnarShuffle => s - s } assert(exchanges.length == 4) } diff --git a/spark/src/test/scala/org/apache/spark/sql/CometTPCHQuerySuite.scala b/spark/src/test/scala/org/apache/spark/sql/CometTPCHQuerySuite.scala index 2264635d8..9ea0218c2 100644 --- a/spark/src/test/scala/org/apache/spark/sql/CometTPCHQuerySuite.scala +++ b/spark/src/test/scala/org/apache/spark/sql/CometTPCHQuerySuite.scala @@ -271,7 +271,7 @@ class CometTPCHQuerySuite extends QueryTest with CometTPCBase with SQLQueryTestH } // TODO: remove once Spark 3.2 & 3.3 is no longer supported - private val shouldRegenerateGoldenFiles: Boolean = + private def shouldRegenerateGoldenFiles: Boolean = System.getenv("SPARK_GENERATE_GOLDEN_FILES") == "1" }