diff --git a/.github/workflows/spark_sql_test.yml b/.github/workflows/spark_sql_test.yml
index 477e3a1ab9..238fbb2715 100644
--- a/.github/workflows/spark_sql_test.yml
+++ b/.github/workflows/spark_sql_test.yml
@@ -71,7 +71,7 @@ jobs:
with:
spark-version: ${{ matrix.spark-version.full }}
spark-short-version: ${{ matrix.spark-version.short }}
- comet-version: '0.5.0-SNAPSHOT' # TODO: get this from pom.xml
+ comet-version: '0.6.0-SNAPSHOT' # TODO: get this from pom.xml
- name: Run Spark tests
run: |
cd apache-spark
diff --git a/.github/workflows/spark_sql_test_ansi.yml b/.github/workflows/spark_sql_test_ansi.yml
index e1d8388fb1..14ec6366f4 100644
--- a/.github/workflows/spark_sql_test_ansi.yml
+++ b/.github/workflows/spark_sql_test_ansi.yml
@@ -69,7 +69,7 @@ jobs:
with:
spark-version: ${{ matrix.spark-version.full }}
spark-short-version: ${{ matrix.spark-version.short }}
- comet-version: '0.5.0-SNAPSHOT' # TODO: get this from pom.xml
+ comet-version: '0.6.0-SNAPSHOT' # TODO: get this from pom.xml
- name: Run Spark tests
run: |
cd apache-spark
diff --git a/common/pom.xml b/common/pom.xml
index 91109edf5d..b6cd75a32d 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -26,7 +26,7 @@ under the License.
For example, Iceberg's DeleteFile has a row id mapping to map row id to position. This
+ * interface is used to set and get the row id mapping. The row id mapping is an array of integers,
+ * where the index is the row id and the value is the position. Here is an example:
+ * [0,1,2,3,4,5,6,7] -- Original status of the row id mapping array Position delete 2, 6
+ * [0,1,3,4,5,7,-,-] -- After applying position deletes [Set Num records to 6]
+ */
+public interface HasRowIdMapping {
+ default void setRowIdMapping(int[] rowIdMapping) {
+ throw new UnsupportedOperationException("setRowIdMapping is not supported");
+ }
+
+ default int[] getRowIdMapping() {
+ throw new UnsupportedOperationException("getRowIdMapping is not supported");
+ }
+}
diff --git a/docs/source/contributor-guide/benchmark-results/tpc-ds.md b/docs/source/contributor-guide/benchmark-results/tpc-ds.md
index a6650f7e74..012913189a 100644
--- a/docs/source/contributor-guide/benchmark-results/tpc-ds.md
+++ b/docs/source/contributor-guide/benchmark-results/tpc-ds.md
@@ -19,8 +19,8 @@ under the License.
# Apache DataFusion Comet: Benchmarks Derived From TPC-DS
-The following benchmarks were performed on a two node Kubernetes cluster with
-data stored locally in Parquet format on NVMe storage. Performance characteristics will vary in different environments
+The following benchmarks were performed on a Linux workstation with PCIe 5, AMD 7950X CPU (16 cores), 128 GB RAM, and
+data stored locally in Parquet format on NVMe storage. Performance characteristics will vary in different environments
and we encourage you to run these benchmarks in your own environments.
The tracking issue for improving TPC-DS performance is [#858](https://github.com/apache/datafusion-comet/issues/858).
@@ -43,3 +43,64 @@ The raw results of these benchmarks in JSON format is available here:
- [Spark](0.5.0/spark-tpcds.json)
- [Comet](0.5.0/comet-tpcds.json)
+
+# Scripts
+
+Here are the scripts that were used to generate these results.
+
+## Apache Spark
+
+```shell
+#!/bin/bash
+$SPARK_HOME/bin/spark-submit \
+ --master $SPARK_MASTER \
+ --conf spark.driver.memory=8G \
+ --conf spark.executor.memory=32G \
+ --conf spark.executor.instances=2 \
+ --conf spark.executor.cores=8 \
+ --conf spark.cores.max=16 \
+ --conf spark.eventLog.enabled=true \
+ tpcbench.py \
+ --benchmark tpcds \
+ --name spark \
+ --data /mnt/bigdata/tpcds/sf100/ \
+ --queries ../../tpcds/ \
+ --output . \
+ --iterations 5
+```
+
+## Apache Spark + Comet
+
+```shell
+#!/bin/bash
+$SPARK_HOME/bin/spark-submit \
+ --master $SPARK_MASTER \
+ --conf spark.driver.memory=8G \
+ --conf spark.executor.instances=2 \
+ --conf spark.executor.memory=16G \
+ --conf spark.executor.cores=8 \
+ --total-executor-cores=16 \
+ --conf spark.eventLog.enabled=true \
+ --conf spark.driver.maxResultSize=2G \
+ --conf spark.memory.offHeap.enabled=true \
+ --conf spark.memory.offHeap.size=24g \
+ --jars $COMET_JAR \
+ --conf spark.driver.extraClassPath=$COMET_JAR \
+ --conf spark.executor.extraClassPath=$COMET_JAR \
+ --conf spark.plugins=org.apache.spark.CometPlugin \
+ --conf spark.comet.enabled=true \
+ --conf spark.comet.cast.allowIncompatible=true \
+ --conf spark.comet.exec.replaceSortMergeJoin=false \
+ --conf spark.comet.exec.shuffle.enabled=true \
+ --conf spark.comet.exec.shuffle.mode=auto \
+ --conf spark.comet.exec.shuffle.fallbackToColumnar=true \
+ --conf spark.comet.exec.shuffle.compression.codec=lz4 \
+ --conf spark.shuffle.manager=org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager \
+ tpcbench.py \
+ --name comet \
+ --benchmark tpcds \
+ --data /mnt/bigdata/tpcds/sf100/ \
+ --queries ../../tpcds/ \
+ --output . \
+ --iterations 5
+```
\ No newline at end of file
diff --git a/docs/source/contributor-guide/benchmark-results/tpc-h.md b/docs/source/contributor-guide/benchmark-results/tpc-h.md
index 336deb7a7c..d383cae852 100644
--- a/docs/source/contributor-guide/benchmark-results/tpc-h.md
+++ b/docs/source/contributor-guide/benchmark-results/tpc-h.md
@@ -25,21 +25,84 @@ and we encourage you to run these benchmarks in your own environments.
The tracking issue for improving TPC-H performance is [#391](https://github.com/apache/datafusion-comet/issues/391).
-
+
Here is a breakdown showing relative performance of Spark and Comet for each query.
-
+
The following chart shows how much Comet currently accelerates each query from the benchmark in relative terms.
-
+
The following chart shows how much Comet currently accelerates each query from the benchmark in absolute terms.
-
+
The raw results of these benchmarks in JSON format is available here:
- [Spark](0.5.0/spark-tpch.json)
- [Comet](0.5.0/comet-tpch.json)
+
+# Scripts
+
+Here are the scripts that were used to generate these results.
+
+## Apache Spark
+
+```shell
+#!/bin/bash
+$SPARK_HOME/bin/spark-submit \
+ --master $SPARK_MASTER \
+ --conf spark.driver.memory=8G \
+ --conf spark.executor.instances=1 \
+ --conf spark.executor.cores=8 \
+ --conf spark.cores.max=8 \
+ --conf spark.executor.memory=16g \
+ --conf spark.memory.offHeap.enabled=true \
+ --conf spark.memory.offHeap.size=16g \
+ --conf spark.eventLog.enabled=true \
+ tpcbench.py \
+ --name spark \
+ --benchmark tpch \
+ --data /mnt/bigdata/tpch/sf100/ \
+ --queries ../../tpch/queries \
+ --output . \
+ --iterations 5
+
+```
+
+## Apache Spark + Comet
+
+```shell
+#!/bin/bash
+$SPARK_HOME/bin/spark-submit \
+ --master $SPARK_MASTER \
+ --conf spark.driver.memory=8G \
+ --conf spark.executor.instances=1 \
+ --conf spark.executor.cores=8 \
+ --conf spark.cores.max=8 \
+ --conf spark.executor.memory=16g \
+ --conf spark.memory.offHeap.enabled=true \
+ --conf spark.memory.offHeap.size=16g \
+ --conf spark.comet.exec.replaceSortMergeJoin=true \
+ --conf spark.eventLog.enabled=true \
+ --jars $COMET_JAR \
+ --driver-class-path $COMET_JAR \
+ --conf spark.driver.extraClassPath=$COMET_JAR \
+ --conf spark.executor.extraClassPath=$COMET_JAR \
+ --conf spark.sql.extensions=org.apache.comet.CometSparkSessionExtensions \
+ --conf spark.comet.enabled=true \
+ --conf spark.comet.exec.shuffle.enabled=true \
+ --conf spark.comet.exec.shuffle.mode=auto \
+ --conf spark.comet.exec.shuffle.fallbackToColumnar=true \
+ --conf spark.comet.exec.shuffle.compression.codec=lz4 \
+ --conf spark.shuffle.manager=org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager \
+ tpcbench.py \
+ --name comet \
+ --benchmark tpch \
+ --data /mnt/bigdata/tpch/sf100/ \
+ --queries ../../tpch/queries \
+ --output . \
+ --iterations 5
+```
\ No newline at end of file
diff --git a/docs/source/contributor-guide/benchmarking.md b/docs/source/contributor-guide/benchmarking.md
index 173d598ac2..e2372b3d66 100644
--- a/docs/source/contributor-guide/benchmarking.md
+++ b/docs/source/contributor-guide/benchmarking.md
@@ -24,62 +24,6 @@ benchmarking documentation and scripts are available in the [DataFusion Benchmar
We also have many micro benchmarks that can be run from an IDE located [here](https://github.com/apache/datafusion-comet/tree/main/spark/src/test/scala/org/apache/spark/sql/benchmark).
-Here are example commands for running the benchmarks against a Spark cluster. This command will need to be
-adapted based on the Spark environment and location of data files.
-
-These commands are intended to be run from the `runners/datafusion-comet` directory in the `datafusion-benchmarks`
-repository.
-
-## Running Benchmarks Against Apache Spark
-
-```shell
-$SPARK_HOME/bin/spark-submit \
- --master $SPARK_MASTER \
- --conf spark.driver.memory=8G \
- --conf spark.executor.instances=1 \
- --conf spark.executor.memory=32G \
- --conf spark.executor.cores=8 \
- --conf spark.cores.max=8 \
- tpcbench.py \
- --benchmark tpch \
- --data /mnt/bigdata/tpch/sf100/ \
- --queries ../../tpch/queries \
- --iterations 3
-```
-
-## Running Benchmarks Against Apache Spark with Apache DataFusion Comet Enabled
-
-### TPC-H
-
-```shell
-$SPARK_HOME/bin/spark-submit \
- --master $SPARK_MASTER \
- --conf spark.driver.memory=8G \
- --conf spark.executor.instances=1 \
- --conf spark.executor.memory=16G \
- --conf spark.executor.cores=8 \
- --conf spark.cores.max=8 \
- --conf spark.memory.offHeap.enabled=true \
- --conf spark.memory.offHeap.size=16g \
- --jars $COMET_JAR \
- --conf spark.driver.extraClassPath=$COMET_JAR \
- --conf spark.executor.extraClassPath=$COMET_JAR \
- --conf spark.plugins=org.apache.spark.CometPlugin \
- --conf spark.comet.cast.allowIncompatible=true \
- --conf spark.comet.exec.replaceSortMergeJoin=true \
- --conf spark.shuffle.manager=org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager \
- --conf spark.comet.exec.shuffle.enabled=true \
- --conf spark.comet.exec.shuffle.mode=auto \
- --conf spark.comet.exec.shuffle.enableFastEncoding=true \
- --conf spark.comet.exec.shuffle.fallbackToColumnar=true \
- --conf spark.comet.exec.shuffle.compression.codec=lz4 \
- tpcbench.py \
- --benchmark tpch \
- --data /mnt/bigdata/tpch/sf100/ \
- --queries ../../tpch/queries \
- --iterations 3
-```
-
### TPC-DS
For TPC-DS, use `spark.comet.exec.replaceSortMergeJoin=false`.
diff --git a/docs/source/contributor-guide/debugging.md b/docs/source/contributor-guide/debugging.md
index 47d1f04c87..8a368cca26 100644
--- a/docs/source/contributor-guide/debugging.md
+++ b/docs/source/contributor-guide/debugging.md
@@ -130,7 +130,7 @@ Then build the Comet as [described](https://github.com/apache/arrow-datafusion-c
Start Comet with `RUST_BACKTRACE=1`
```console
-RUST_BACKTRACE=1 $SPARK_HOME/spark-shell --jars spark/target/comet-spark-spark3.4_2.12-0.5.0-SNAPSHOT.jar --conf spark.plugins=org.apache.spark.CometPlugin --conf spark.comet.enabled=true --conf spark.comet.exec.enabled=true
+RUST_BACKTRACE=1 $SPARK_HOME/spark-shell --jars spark/target/comet-spark-spark3.4_2.12-0.6.0-SNAPSHOT.jar --conf spark.plugins=org.apache.spark.CometPlugin --conf spark.comet.enabled=true --conf spark.comet.exec.enabled=true
```
Get the expanded exception details
diff --git a/docs/source/user-guide/installation.md b/docs/source/user-guide/installation.md
index 22d482e475..390c926387 100644
--- a/docs/source/user-guide/installation.md
+++ b/docs/source/user-guide/installation.md
@@ -74,7 +74,7 @@ See the [Comet Kubernetes Guide](kubernetes.md) guide.
Make sure `SPARK_HOME` points to the same Spark version as Comet was built for.
```console
-export COMET_JAR=spark/target/comet-spark-spark3.4_2.12-0.5.0-SNAPSHOT.jar
+export COMET_JAR=spark/target/comet-spark-spark3.4_2.12-0.6.0-SNAPSHOT.jar
$SPARK_HOME/bin/spark-shell \
--jars $COMET_JAR \
@@ -130,7 +130,7 @@ explicitly contain Comet otherwise Spark may use a different class-loader for th
components which will then fail at runtime. For example:
```
---driver-class-path spark/target/comet-spark-spark3.4_2.12-0.5.0-SNAPSHOT.jar
+--driver-class-path spark/target/comet-spark-spark3.4_2.12-0.6.0-SNAPSHOT.jar
```
Some cluster managers may require additional configuration, see