diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 9e5b0f424838..75189f49e861 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -31,9 +31,9 @@ on: required: true default: '8' scala: - description: 'Scala version: 2.12 or 2.13' + description: 'Scala version: 2.13' required: true - default: '2.12' + default: '2.13' failfast: description: 'Failfast: true or false' required: true @@ -170,7 +170,6 @@ jobs: key: tpcds-${{ hashFiles('.github/workflows/benchmark.yml', 'sql/core/src/test/scala/org/apache/spark/sql/TPCDSSchema.scala') }} - name: Run benchmarks run: | - dev/change-scala-version.sh ${{ github.event.inputs.scala }} ./build/sbt -Pscala-${{ github.event.inputs.scala }} -Pyarn -Pmesos -Pkubernetes -Phive -Phive-thriftserver -Phadoop-cloud -Pkinesis-asl -Pspark-ganglia-lgpl Test/package # Make less noisy cp conf/log4j2.properties.template conf/log4j2.properties @@ -181,8 +180,6 @@ jobs: --jars "`find . -name '*-SNAPSHOT-tests.jar' -o -name '*avro*-SNAPSHOT.jar' | paste -sd ',' -`" \ "`find . -name 'spark-core*-SNAPSHOT-tests.jar'`" \ "${{ github.event.inputs.class }}" - # Revert to default Scala version to clean up unnecessary git diff - dev/change-scala-version.sh 2.12 # To keep the directory structure and file permissions, tar them # See also https://github.com/actions/upload-artifact#maintaining-file-permissions-and-case-sensitive-files echo "Preparing the benchmark results:" diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index f8e143833c5d..531d4f86ee79 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -86,7 +86,7 @@ jobs: sparkr=`./dev/is-changed.py -m sparkr` tpcds=`./dev/is-changed.py -m sql` docker=`./dev/is-changed.py -m docker-integration-tests` - # 'build', 'scala-213', and 'java-other-versions' are always true for now. + # 'build' and 'java-other-versions' are always true for now. # It does not save significant time and most of PRs trigger the build. precondition=" { @@ -95,7 +95,6 @@ jobs: \"sparkr\": \"$sparkr\", \"tpcds-1g\": \"$tpcds\", \"docker-integration-tests\": \"$docker\", - \"scala-213\": \"true\", \"java-other-versions\": \"true\", \"lint\" : \"true\", \"k8s-integration-tests\" : \"true\", @@ -828,53 +827,6 @@ jobs: ./build/mvn $MAVEN_CLI_OPTS -DskipTests -Pyarn -Pmesos -Pkubernetes -Pvolcano -Phive -Phive-thriftserver -Phadoop-cloud -Djava.version=${JAVA_VERSION/-ea} install rm -rf ~/.m2/repository/org/apache/spark - scala-213: - needs: precondition - if: fromJson(needs.precondition.outputs.required).scala-213 == 'true' - name: Scala 2.13 build with SBT - runs-on: ubuntu-22.04 - timeout-minutes: 300 - steps: - - name: Checkout Spark repository - uses: actions/checkout@v3 - with: - fetch-depth: 0 - repository: apache/spark - ref: ${{ inputs.branch }} - - name: Sync the current branch with the latest in Apache Spark - if: github.repository != 'apache/spark' - run: | - git fetch https://github.com/$GITHUB_REPOSITORY.git ${GITHUB_REF#refs/heads/} - git -c user.name='Apache Spark Test Account' -c user.email='sparktestacc@gmail.com' merge --no-commit --progress --squash FETCH_HEAD - git -c user.name='Apache Spark Test Account' -c user.email='sparktestacc@gmail.com' commit -m "Merged commit" --allow-empty - - name: Cache Scala, SBT and Maven - uses: actions/cache@v3 - with: - path: | - build/apache-maven-* - build/scala-* - build/*.jar - ~/.sbt - key: build-${{ hashFiles('**/pom.xml', 'project/build.properties', 'build/mvn', 'build/sbt', 'build/sbt-launch-lib.bash', 'build/spark-build-info') }} - restore-keys: | - build- - - name: Cache Coursier local repository - uses: actions/cache@v3 - with: - path: ~/.cache/coursier - key: scala-213-coursier-${{ hashFiles('**/pom.xml', '**/plugins.sbt') }} - restore-keys: | - scala-213-coursier- - - name: Install Java 8 - uses: actions/setup-java@v3 - with: - distribution: zulu - java-version: 8 - - name: Build with SBT - run: | - ./dev/change-scala-version.sh 2.13 - ./build/sbt -Pyarn -Pmesos -Pkubernetes -Pvolcano -Phive -Phive-thriftserver -Phadoop-cloud -Pkinesis-asl -Pdocker-integration-tests -Pkubernetes-integration-tests -Pspark-ganglia-lgpl -Pscala-2.13 compile Test/compile - # Any TPC-DS related updates on this job need to be applied to tpcds-1g-gen job of benchmark.yml as well tpcds-1g: needs: precondition diff --git a/.github/workflows/build_ansi.yml b/.github/workflows/build_ansi.yml index e67a9262fcd7..be9143288455 100644 --- a/.github/workflows/build_ansi.yml +++ b/.github/workflows/build_ansi.yml @@ -17,7 +17,7 @@ # under the License. # -name: "Build / ANSI (master, Hadoop 3, JDK 8, Scala 2.12)" +name: "Build / ANSI (master, Hadoop 3, JDK 8, Scala 2.13)" on: schedule: diff --git a/.github/workflows/build_coverage.yml b/.github/workflows/build_coverage.yml index aa210f003186..9c448bb2b1f2 100644 --- a/.github/workflows/build_coverage.yml +++ b/.github/workflows/build_coverage.yml @@ -17,7 +17,7 @@ # under the License. # -name: "Build / Coverage (master, Scala 2.12, Hadoop 3, JDK 8)" +name: "Build / Coverage (master, Scala 2.13, Hadoop 3, JDK 8)" on: schedule: diff --git a/.github/workflows/build_java11.yml b/.github/workflows/build_java11.yml index bf7b2edb45ff..f601e8622902 100644 --- a/.github/workflows/build_java11.yml +++ b/.github/workflows/build_java11.yml @@ -17,7 +17,7 @@ # under the License. # -name: "Build (master, Scala 2.12, Hadoop 3, JDK 11)" +name: "Build (master, Scala 2.13, Hadoop 3, JDK 11)" on: schedule: diff --git a/.github/workflows/build_java17.yml b/.github/workflows/build_java17.yml index 9465e5ea0e31..1e5a49101de6 100644 --- a/.github/workflows/build_java17.yml +++ b/.github/workflows/build_java17.yml @@ -17,7 +17,7 @@ # under the License. # -name: "Build (master, Scala 2.12, Hadoop 3, JDK 17)" +name: "Build (master, Scala 2.13, Hadoop 3, JDK 17)" on: schedule: diff --git a/.github/workflows/build_java21.yml b/.github/workflows/build_java21.yml index f317bb8ff8a7..8d8fc1ef7f93 100644 --- a/.github/workflows/build_java21.yml +++ b/.github/workflows/build_java21.yml @@ -17,7 +17,7 @@ # under the License. # -name: "Build (master, Scala 2.12, Hadoop 3, JDK 21)" +name: "Build (master, Scala 2.13, Hadoop 3, JDK 21)" on: schedule: diff --git a/.github/workflows/build_maven.yml b/.github/workflows/build_maven.yml index 4b68224e9675..7f26324eb75a 100644 --- a/.github/workflows/build_maven.yml +++ b/.github/workflows/build_maven.yml @@ -17,7 +17,7 @@ # under the License. # -name: "Build using Maven (master, Scala 2.12, Hadoop 3, JDK 8)" +name: "Build using Maven (master, Scala 2.13, Hadoop 3, JDK 8)" on: schedule: diff --git a/.github/workflows/build_rockdb_as_ui_backend.yml b/.github/workflows/build_rockdb_as_ui_backend.yml index 04e0e7c2e107..56b664270f11 100644 --- a/.github/workflows/build_rockdb_as_ui_backend.yml +++ b/.github/workflows/build_rockdb_as_ui_backend.yml @@ -17,7 +17,7 @@ # under the License. # -name: "Build / RocksDB as UI Backend (master, Hadoop 3, JDK 8, Scala 2.12)" +name: "Build / RocksDB as UI Backend (master, Hadoop 3, JDK 8, Scala 2.13)" on: schedule: diff --git a/LICENSE-binary b/LICENSE-binary index 900b64611063..865a30f0f308 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -209,10 +209,10 @@ org.apache.zookeeper:zookeeper oro:oro commons-configuration:commons-configuration commons-digester:commons-digester -com.chuusai:shapeless_2.12 +com.chuusai:shapeless_2.13 com.googlecode.javaewah:JavaEWAH com.twitter:chill-java -com.twitter:chill_2.12 +com.twitter:chill_2.13 com.univocity:univocity-parsers javax.jdo:jdo-api joda-time:joda-time @@ -220,23 +220,23 @@ net.sf.opencsv:opencsv org.apache.derby:derby org.objenesis:objenesis org.roaringbitmap:RoaringBitmap -org.scalanlp:breeze-macros_2.12 -org.scalanlp:breeze_2.12 -org.typelevel:macro-compat_2.12 +org.scalanlp:breeze-macros_2.13 +org.scalanlp:breeze_2.13 +org.typelevel:macro-compat_2.13 org.yaml:snakeyaml org.apache.xbean:xbean-asm7-shaded com.squareup.okhttp3:logging-interceptor com.squareup.okhttp3:okhttp com.squareup.okio:okio -org.apache.spark:spark-catalyst_2.12 -org.apache.spark:spark-kvstore_2.12 -org.apache.spark:spark-launcher_2.12 -org.apache.spark:spark-mllib-local_2.12 -org.apache.spark:spark-network-common_2.12 -org.apache.spark:spark-network-shuffle_2.12 -org.apache.spark:spark-sketch_2.12 -org.apache.spark:spark-tags_2.12 -org.apache.spark:spark-unsafe_2.12 +org.apache.spark:spark-catalyst_2.13 +org.apache.spark:spark-kvstore_2.13 +org.apache.spark:spark-launcher_2.13 +org.apache.spark:spark-mllib-local_2.13 +org.apache.spark:spark-network-common_2.13 +org.apache.spark:spark-network-shuffle_2.13 +org.apache.spark:spark-sketch_2.13 +org.apache.spark:spark-tags_2.13 +org.apache.spark:spark-unsafe_2.13 commons-httpclient:commons-httpclient com.vlkan:flatbuffers com.ning:compress-lzf @@ -299,10 +299,10 @@ org.apache.orc:orc-mapreduce org.mortbay.jetty:jetty org.mortbay.jetty:jetty-util com.jolbox:bonecp -org.json4s:json4s-ast_2.12 -org.json4s:json4s-core_2.12 -org.json4s:json4s-jackson_2.12 -org.json4s:json4s-scalap_2.12 +org.json4s:json4s-ast_2.13 +org.json4s:json4s-core_2.13 +org.json4s:json4s-jackson_2.13 +org.json4s:json4s-scalap_2.13 com.carrotsearch:hppc com.fasterxml.jackson.core:jackson-annotations com.fasterxml.jackson.core:jackson-core @@ -312,7 +312,7 @@ com.fasterxml.jackson.jaxrs:jackson-jaxrs-base com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider com.fasterxml.jackson.module:jackson-module-jaxb-annotations com.fasterxml.jackson.module:jackson-module-paranamer -com.fasterxml.jackson.module:jackson-module-scala_2.12 +com.fasterxml.jackson.module:jackson-module-scala_2.13 com.github.mifmif:generex com.google.code.findbugs:jsr305 com.google.code.gson:gson @@ -385,8 +385,8 @@ org.eclipse.jetty:jetty-xml org.scala-lang:scala-compiler org.scala-lang:scala-library org.scala-lang:scala-reflect -org.scala-lang.modules:scala-parser-combinators_2.12 -org.scala-lang.modules:scala-xml_2.12 +org.scala-lang.modules:scala-parser-combinators_2.13 +org.scala-lang.modules:scala-xml_2.13 com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter com.zaxxer.HikariCP org.apache.hive:hive-beeline @@ -471,19 +471,19 @@ MIT License ----------- com.microsoft.sqlserver:mssql-jdbc -org.typelevel:spire_2.12 -org.typelevel:spire-macros_2.12 -org.typelevel:spire-platform_2.12 -org.typelevel:spire-util_2.12 -org.typelevel:algebra_2.12:jar -org.typelevel:cats-kernel_2.12 -org.typelevel:machinist_2.12 +org.typelevel:spire_2.13 +org.typelevel:spire-macros_2.13 +org.typelevel:spire-platform_2.13 +org.typelevel:spire-util_2.13 +org.typelevel:algebra_2.13:jar +org.typelevel:cats-kernel_2.13 +org.typelevel:machinist_2.13 net.razorvine:pickle org.slf4j:jcl-over-slf4j org.slf4j:jul-to-slf4j org.slf4j:slf4j-api org.slf4j:slf4j-log4j12 -com.github.scopt:scopt_2.12 +com.github.scopt:scopt_2.13 dev.ludovic.netlib:blas dev.ludovic.netlib:arpack dev.ludovic.netlib:lapack diff --git a/assembly/pom.xml b/assembly/pom.xml index 69952a7ccfa0..020ef80c4ad3 100644 --- a/assembly/pom.xml +++ b/assembly/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../pom.xml - spark-assembly_2.12 + spark-assembly_2.13 Spark Project Assembly https://spark.apache.org/ pom diff --git a/bin/load-spark-env.cmd b/bin/load-spark-env.cmd index 5692af529fb6..2d3906d9e0b5 100644 --- a/bin/load-spark-env.cmd +++ b/bin/load-spark-env.cmd @@ -39,21 +39,21 @@ set SCALA_VERSION_2=2.12 set ASSEMBLY_DIR1="%SPARK_HOME%\assembly\target\scala-%SCALA_VERSION_1%" set ASSEMBLY_DIR2="%SPARK_HOME%\assembly\target\scala-%SCALA_VERSION_2%" set ENV_VARIABLE_DOC=https://spark.apache.org/docs/latest/configuration.html#environment-variables - -if not defined SPARK_SCALA_VERSION ( - if exist %ASSEMBLY_DIR2% if exist %ASSEMBLY_DIR1% ( - echo Presence of build for multiple Scala versions detected ^(%ASSEMBLY_DIR1% and %ASSEMBLY_DIR2%^). - echo Remove one of them or, set SPARK_SCALA_VERSION=%SCALA_VERSION_1% in spark-env.cmd. - echo Visit %ENV_VARIABLE_DOC% for more details about setting environment variables in spark-env.cmd. - echo Either clean one of them or, set SPARK_SCALA_VERSION in spark-env.cmd. - exit 1 - ) - if exist %ASSEMBLY_DIR1% ( - set SPARK_SCALA_VERSION=%SCALA_VERSION_1% - ) else ( - set SPARK_SCALA_VERSION=%SCALA_VERSION_2% - ) -) +set SPARK_SCALA_VERSION=2.13 +rem if not defined SPARK_SCALA_VERSION ( +rem if exist %ASSEMBLY_DIR2% if exist %ASSEMBLY_DIR1% ( +rem echo Presence of build for multiple Scala versions detected ^(%ASSEMBLY_DIR1% and %ASSEMBLY_DIR2%^). +rem echo Remove one of them or, set SPARK_SCALA_VERSION=%SCALA_VERSION_1% in spark-env.cmd. +rem echo Visit %ENV_VARIABLE_DOC% for more details about setting environment variables in spark-env.cmd. +rem echo Either clean one of them or, set SPARK_SCALA_VERSION in spark-env.cmd. +rem exit 1 +rem ) +rem if exist %ASSEMBLY_DIR1% ( +rem set SPARK_SCALA_VERSION=%SCALA_VERSION_1% +rem ) else ( +rem set SPARK_SCALA_VERSION=%SCALA_VERSION_2% +rem ) +rem ) exit /b 0 :LoadSparkEnv diff --git a/bin/load-spark-env.sh b/bin/load-spark-env.sh index fc5e881dd0df..861ebbbcb7a3 100644 --- a/bin/load-spark-env.sh +++ b/bin/load-spark-env.sh @@ -42,27 +42,27 @@ if [ -z "$SPARK_ENV_LOADED" ]; then fi # Setting SPARK_SCALA_VERSION if not already set. - -if [ -z "$SPARK_SCALA_VERSION" ]; then - SCALA_VERSION_1=2.13 - SCALA_VERSION_2=2.12 - - ASSEMBLY_DIR_1="${SPARK_HOME}/assembly/target/scala-${SCALA_VERSION_1}" - ASSEMBLY_DIR_2="${SPARK_HOME}/assembly/target/scala-${SCALA_VERSION_2}" - ENV_VARIABLE_DOC="https://spark.apache.org/docs/latest/configuration.html#environment-variables" - if [[ -d "$ASSEMBLY_DIR_1" && -d "$ASSEMBLY_DIR_2" ]]; then - echo "Presence of build for multiple Scala versions detected ($ASSEMBLY_DIR_1 and $ASSEMBLY_DIR_2)." 1>&2 - echo "Remove one of them or, export SPARK_SCALA_VERSION=$SCALA_VERSION_1 in ${SPARK_ENV_SH}." 1>&2 - echo "Visit ${ENV_VARIABLE_DOC} for more details about setting environment variables in spark-env.sh." 1>&2 - exit 1 - fi - - if [[ -d "$ASSEMBLY_DIR_1" ]]; then - export SPARK_SCALA_VERSION=${SCALA_VERSION_1} - else - export SPARK_SCALA_VERSION=${SCALA_VERSION_2} - fi -fi +export SPARK_SCALA_VERSION=2.13 +#if [ -z "$SPARK_SCALA_VERSION" ]; then +# SCALA_VERSION_1=2.13 +# SCALA_VERSION_2=2.12 +# +# ASSEMBLY_DIR_1="${SPARK_HOME}/assembly/target/scala-${SCALA_VERSION_1}" +# ASSEMBLY_DIR_2="${SPARK_HOME}/assembly/target/scala-${SCALA_VERSION_2}" +# ENV_VARIABLE_DOC="https://spark.apache.org/docs/latest/configuration.html#environment-variables" +# if [[ -d "$ASSEMBLY_DIR_1" && -d "$ASSEMBLY_DIR_2" ]]; then +# echo "Presence of build for multiple Scala versions detected ($ASSEMBLY_DIR_1 and $ASSEMBLY_DIR_2)." 1>&2 +# echo "Remove one of them or, export SPARK_SCALA_VERSION=$SCALA_VERSION_1 in ${SPARK_ENV_SH}." 1>&2 +# echo "Visit ${ENV_VARIABLE_DOC} for more details about setting environment variables in spark-env.sh." 1>&2 +# exit 1 +# fi +# +# if [[ -d "$ASSEMBLY_DIR_1" ]]; then +# export SPARK_SCALA_VERSION=${SCALA_VERSION_1} +# else +# export SPARK_SCALA_VERSION=${SCALA_VERSION_2} +# fi +#fi # Append jline option to enable the Beeline process to run in background. if [[ ( ! $(ps -o stat= -p $$) =~ "+" ) && ! ( -p /dev/stdin ) ]]; then diff --git a/common/kvstore/pom.xml b/common/kvstore/pom.xml index 6a4d029562af..54b7f401cc4f 100644 --- a/common/kvstore/pom.xml +++ b/common/kvstore/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-kvstore_2.12 + spark-kvstore_2.13 jar Spark Project Local DB https://spark.apache.org/ diff --git a/common/network-common/pom.xml b/common/network-common/pom.xml index 1823edbe0f53..a37f64a962fb 100644 --- a/common/network-common/pom.xml +++ b/common/network-common/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-network-common_2.12 + spark-network-common_2.13 jar Spark Project Networking https://spark.apache.org/ diff --git a/common/network-shuffle/pom.xml b/common/network-shuffle/pom.xml index 8fccc855a53f..0f7036ef746c 100644 --- a/common/network-shuffle/pom.xml +++ b/common/network-shuffle/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-network-shuffle_2.12 + spark-network-shuffle_2.13 jar Spark Project Shuffle Streaming Service https://spark.apache.org/ diff --git a/common/network-yarn/pom.xml b/common/network-yarn/pom.xml index 57e18f7a43dc..5661a1d59a6b 100644 --- a/common/network-yarn/pom.xml +++ b/common/network-yarn/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-network-yarn_2.12 + spark-network-yarn_2.13 jar Spark Project YARN Shuffle Service https://spark.apache.org/ diff --git a/common/sketch/pom.xml b/common/sketch/pom.xml index 3d24494e543f..e3f3d4127b71 100644 --- a/common/sketch/pom.xml +++ b/common/sketch/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-sketch_2.12 + spark-sketch_2.13 jar Spark Project Sketch https://spark.apache.org/ diff --git a/common/tags/pom.xml b/common/tags/pom.xml index cdeb862abd5f..1894b6f4acb7 100644 --- a/common/tags/pom.xml +++ b/common/tags/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-tags_2.12 + spark-tags_2.13 jar Spark Project Tags https://spark.apache.org/ diff --git a/common/unsafe/pom.xml b/common/unsafe/pom.xml index 284d0176afdd..4d23c9149bb7 100644 --- a/common/unsafe/pom.xml +++ b/common/unsafe/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-unsafe_2.12 + spark-unsafe_2.13 jar Spark Project Unsafe https://spark.apache.org/ diff --git a/common/utils/pom.xml b/common/utils/pom.xml index 2f2fee0cf41e..37d1ea48d972 100644 --- a/common/utils/pom.xml +++ b/common/utils/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-common-utils_2.12 + spark-common-utils_2.13 jar Spark Project Common Utils https://spark.apache.org/ diff --git a/connector/avro/pom.xml b/connector/avro/pom.xml index d614ccf03846..bde5d5de6eae 100644 --- a/connector/avro/pom.xml +++ b/connector/avro/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-avro_2.12 + spark-avro_2.13 avro @@ -70,12 +70,12 @@ org.apache.spark spark-tags_${scala.binary.version} - org.scala-lang.modules scala-parallel-collections_${scala.binary.version} - --> + org.tukaani xz diff --git a/connector/connect/client/jvm/pom.xml b/connector/connect/client/jvm/pom.xml index 8cb6758ec9f1..9ca66b5c29ca 100644 --- a/connector/connect/client/jvm/pom.xml +++ b/connector/connect/client/jvm/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../../../pom.xml - spark-connect-client-jvm_2.12 + spark-connect-client-jvm_2.13 jar Spark Project Connect Client https://spark.apache.org/ diff --git a/connector/connect/common/pom.xml b/connector/connect/common/pom.xml index 5fd6c8850255..bee76dc2644e 100644 --- a/connector/connect/common/pom.xml +++ b/connector/connect/common/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../../pom.xml - spark-connect-common_2.12 + spark-connect-common_2.13 jar Spark Project Connect Common https://spark.apache.org/ @@ -126,18 +126,6 @@ org.codehaus.mojo build-helper-maven-plugin - - add-sources - generate-sources - - add-source - - - - src/main/scala-${scala.binary.version} - - - add-scala-test-sources generate-test-sources diff --git a/connector/connect/common/src/main/scala-2.12/org/apache/spark/sql/connect/client/arrow/ScalaCollectionUtils.scala b/connector/connect/common/src/main/scala-2.12/org/apache/spark/sql/connect/client/arrow/ScalaCollectionUtils.scala deleted file mode 100644 index c2e01d974e0e..000000000000 --- a/connector/connect/common/src/main/scala-2.12/org/apache/spark/sql/connect/client/arrow/ScalaCollectionUtils.scala +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.spark.sql.connect.client.arrow - -import scala.collection.generic.{GenericCompanion, GenMapFactory} -import scala.collection.mutable -import scala.reflect.ClassTag - -import org.apache.spark.sql.connect.client.arrow.ArrowDeserializers.resolveCompanion - -/** - * A couple of scala version specific collection utility functions. - */ -private[arrow] object ScalaCollectionUtils { - def getIterableCompanion(tag: ClassTag[_]): GenericCompanion[Iterable] = { - ArrowDeserializers.resolveCompanion[GenericCompanion[Iterable]](tag) - } - def getMapCompanion(tag: ClassTag[_]): GenMapFactory[Map] = { - resolveCompanion[GenMapFactory[Map]](tag) - } - def wrap[T](array: AnyRef): mutable.WrappedArray[T] = { - mutable.WrappedArray.make(array) - } -} diff --git a/connector/connect/common/src/main/scala-2.13/org/apache/spark/sql/connect/client/arrow/ScalaCollectionUtils.scala b/connector/connect/common/src/main/scala/org/apache/spark/sql/connect/client/arrow/ScalaCollectionUtils.scala similarity index 100% rename from connector/connect/common/src/main/scala-2.13/org/apache/spark/sql/connect/client/arrow/ScalaCollectionUtils.scala rename to connector/connect/common/src/main/scala/org/apache/spark/sql/connect/client/arrow/ScalaCollectionUtils.scala diff --git a/connector/connect/server/pom.xml b/connector/connect/server/pom.xml index e98b8da8e5c0..291da8d87061 100644 --- a/connector/connect/server/pom.xml +++ b/connector/connect/server/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../../pom.xml - spark-connect_2.12 + spark-connect_2.13 jar Spark Project Connect Server https://spark.apache.org/ @@ -152,12 +152,12 @@ - org.scala-lang.modules scala-parallel-collections_${scala.binary.version} - --> + com.google.guava guava diff --git a/connector/docker-integration-tests/pom.xml b/connector/docker-integration-tests/pom.xml index a9c066ede2d8..4308abbbe201 100644 --- a/connector/docker-integration-tests/pom.xml +++ b/connector/docker-integration-tests/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-docker-integration-tests_2.12 + spark-docker-integration-tests_2.13 jar Spark Project Docker Integration Tests https://spark.apache.org/ diff --git a/connector/kafka-0-10-assembly/pom.xml b/connector/kafka-0-10-assembly/pom.xml index bcc3503672e6..b2fcbdf8eca7 100644 --- a/connector/kafka-0-10-assembly/pom.xml +++ b/connector/kafka-0-10-assembly/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-streaming-kafka-0-10-assembly_2.12 + spark-streaming-kafka-0-10-assembly_2.13 jar Spark Integration for Kafka 0.10 Assembly https://spark.apache.org/ diff --git a/connector/kafka-0-10-sql/pom.xml b/connector/kafka-0-10-sql/pom.xml index 9d7ec2c5491b..56f0b874b2b9 100644 --- a/connector/kafka-0-10-sql/pom.xml +++ b/connector/kafka-0-10-sql/pom.xml @@ -20,13 +20,13 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml org.apache.spark - spark-sql-kafka-0-10_2.12 + spark-sql-kafka-0-10_2.13 sql-kafka-0-10 @@ -74,12 +74,12 @@ test-jar test - org.scala-lang.modules scala-parallel-collections_${scala.binary.version} - --> + org.apache.kafka kafka-clients diff --git a/connector/kafka-0-10-token-provider/pom.xml b/connector/kafka-0-10-token-provider/pom.xml index c53091dba5d0..2b2707b9da32 100644 --- a/connector/kafka-0-10-token-provider/pom.xml +++ b/connector/kafka-0-10-token-provider/pom.xml @@ -20,13 +20,13 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml org.apache.spark - spark-token-provider-kafka-0-10_2.12 + spark-token-provider-kafka-0-10_2.13 token-provider-kafka-0-10 diff --git a/connector/kafka-0-10/pom.xml b/connector/kafka-0-10/pom.xml index 8f7ef1541765..4c76c8a9fe46 100644 --- a/connector/kafka-0-10/pom.xml +++ b/connector/kafka-0-10/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-streaming-kafka-0-10_2.12 + spark-streaming-kafka-0-10_2.13 streaming-kafka-0-10 @@ -59,12 +59,12 @@ test-jar test - org.scala-lang.modules scala-parallel-collections_${scala.binary.version} - --> + org.apache.kafka kafka-clients diff --git a/connector/kinesis-asl-assembly/pom.xml b/connector/kinesis-asl-assembly/pom.xml index 2232f581ceab..ecd7d13dcba9 100644 --- a/connector/kinesis-asl-assembly/pom.xml +++ b/connector/kinesis-asl-assembly/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-streaming-kinesis-asl-assembly_2.12 + spark-streaming-kinesis-asl-assembly_2.13 jar Spark Project Kinesis Assembly https://spark.apache.org/ diff --git a/connector/kinesis-asl/pom.xml b/connector/kinesis-asl/pom.xml index 881098f330fd..9a7f40443bbc 100644 --- a/connector/kinesis-asl/pom.xml +++ b/connector/kinesis-asl/pom.xml @@ -19,13 +19,13 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-streaming-kinesis-asl_2.12 + spark-streaming-kinesis-asl_2.13 jar Spark Kinesis Integration diff --git a/connector/protobuf/pom.xml b/connector/protobuf/pom.xml index 26b5b601bccd..7854a9c59dcc 100644 --- a/connector/protobuf/pom.xml +++ b/connector/protobuf/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-protobuf_2.12 + spark-protobuf_2.13 protobuf @@ -70,12 +70,12 @@ org.apache.spark spark-tags_${scala.binary.version} - org.scala-lang.modules scala-parallel-collections_${scala.binary.version} - --> + com.google.protobuf protobuf-java diff --git a/connector/spark-ganglia-lgpl/pom.xml b/connector/spark-ganglia-lgpl/pom.xml index 2f8e3ba015e9..4c9a0c7347e6 100644 --- a/connector/spark-ganglia-lgpl/pom.xml +++ b/connector/spark-ganglia-lgpl/pom.xml @@ -19,13 +19,13 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-ganglia-lgpl_2.12 + spark-ganglia-lgpl_2.13 jar Spark Ganglia Integration diff --git a/core/pom.xml b/core/pom.xml index 24f60c6c8381..6e5e31320421 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../pom.xml - spark-core_2.12 + spark-core_2.13 jar Spark Project Core https://spark.apache.org/ @@ -35,12 +35,12 @@ - org.scala-lang.modules scala-parallel-collections_${scala.binary.version} - --> + org.apache.avro avro @@ -542,24 +542,6 @@ - - org.codehaus.mojo - build-helper-maven-plugin - - - add-sources - generate-sources - - add-source - - - - src/main/scala-${scala.binary.version} - - - - - org.apache.maven.plugins maven-shade-plugin diff --git a/core/src/main/scala-2.12/org/apache/spark/util/BoundedPriorityQueue.scala b/core/src/main/scala-2.12/org/apache/spark/util/BoundedPriorityQueue.scala deleted file mode 100644 index a24102372344..000000000000 --- a/core/src/main/scala-2.12/org/apache/spark/util/BoundedPriorityQueue.scala +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.util - -import java.io.Serializable -import java.util.{PriorityQueue => JPriorityQueue} - -import scala.collection.JavaConverters._ -import scala.collection.generic.Growable - -/** - * Bounded priority queue. This class wraps the original PriorityQueue - * class and modifies it such that only the top K elements are retained. - * The top K elements are defined by an implicit Ordering[A]. - */ -private[spark] class BoundedPriorityQueue[A](maxSize: Int)(implicit ord: Ordering[A]) - extends Iterable[A] with Growable[A] with Serializable { - - // Note: this class supports Scala 2.12. A parallel source tree has a 2.13 implementation. - - private val underlying = new JPriorityQueue[A](maxSize, ord) - - override def iterator: Iterator[A] = underlying.iterator.asScala - - override def size: Int = underlying.size - - override def ++=(xs: TraversableOnce[A]): this.type = { - xs.foreach { this += _ } - this - } - - override def +=(elem: A): this.type = { - if (size < maxSize) { - underlying.offer(elem) - } else { - maybeReplaceLowest(elem) - } - this - } - - def poll(): A = { - underlying.poll() - } - - override def +=(elem1: A, elem2: A, elems: A*): this.type = { - this += elem1 += elem2 ++= elems - } - - override def clear(): Unit = { underlying.clear() } - - private def maybeReplaceLowest(a: A): Boolean = { - val head = underlying.peek() - if (head != null && ord.gt(a, head)) { - underlying.poll() - underlying.offer(a) - } else { - false - } - } -} diff --git a/core/src/main/scala-2.12/org/apache/spark/util/Iterators.scala b/core/src/main/scala-2.12/org/apache/spark/util/Iterators.scala deleted file mode 100644 index af5f369de53e..000000000000 --- a/core/src/main/scala-2.12/org/apache/spark/util/Iterators.scala +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.util - -private[util] object Iterators { - - /** - * Counts the number of elements of an iterator using a while loop rather than calling - * [[scala.collection.Iterator#size]] because it uses a for loop, which is slightly slower - * in the current version of Scala. - */ - def size(iterator: Iterator[_]): Long = { - var count = 0L - while (iterator.hasNext) { - count += 1L - iterator.next() - } - count - } -} diff --git a/core/src/main/scala-2.12/org/apache/spark/util/TimeStampedHashMap.scala b/core/src/main/scala-2.12/org/apache/spark/util/TimeStampedHashMap.scala deleted file mode 100644 index da12582a5083..000000000000 --- a/core/src/main/scala-2.12/org/apache/spark/util/TimeStampedHashMap.scala +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.util - -import java.util.Map.Entry -import java.util.Set -import java.util.concurrent.ConcurrentHashMap - -import scala.collection.JavaConverters._ -import scala.collection.mutable - -import org.apache.spark.internal.Logging - -private[spark] case class TimeStampedValue[V](value: V, timestamp: Long) - -/** - * This is a custom implementation of scala.collection.mutable.Map which stores the insertion - * timestamp along with each key-value pair. If specified, the timestamp of each pair can be - * updated every time it is accessed. Key-value pairs whose timestamp are older than a particular - * threshold time can then be removed using the clearOldValues method. This is intended to - * be a drop-in replacement of scala.collection.mutable.HashMap. - * - * @param updateTimeStampOnGet Whether timestamp of a pair will be updated when it is accessed - */ -private[spark] class TimeStampedHashMap[A, B](updateTimeStampOnGet: Boolean = false) - extends mutable.Map[A, B]() with Logging { - - // Note: this class supports Scala 2.12. A parallel source tree has a 2.13 implementation. - - private val internalMap = new ConcurrentHashMap[A, TimeStampedValue[B]]() - - def get(key: A): Option[B] = { - val value = internalMap.get(key) - if (value != null && updateTimeStampOnGet) { - internalMap.replace(key, value, TimeStampedValue(value.value, currentTime)) - } - Option(value).map(_.value) - } - - def iterator: Iterator[(A, B)] = { - getEntrySet.iterator.asScala.map(kv => (kv.getKey, kv.getValue.value)) - } - - def getEntrySet: Set[Entry[A, TimeStampedValue[B]]] = internalMap.entrySet - - override def + [B1 >: B](kv: (A, B1)): mutable.Map[A, B1] = { - val newMap = new TimeStampedHashMap[A, B1] - val oldInternalMap = this.internalMap.asInstanceOf[ConcurrentHashMap[A, TimeStampedValue[B1]]] - newMap.internalMap.putAll(oldInternalMap) - kv match { case (a, b) => newMap.internalMap.put(a, TimeStampedValue(b, currentTime)) } - newMap - } - - override def - (key: A): mutable.Map[A, B] = { - val newMap = new TimeStampedHashMap[A, B] - newMap.internalMap.putAll(this.internalMap) - newMap.internalMap.remove(key) - newMap - } - - override def += (kv: (A, B)): this.type = { - kv match { case (a, b) => internalMap.put(a, TimeStampedValue(b, currentTime)) } - this - } - - override def -= (key: A): this.type = { - internalMap.remove(key) - this - } - - override def update(key: A, value: B): Unit = { - this += ((key, value)) - } - - override def apply(key: A): B = { - get(key).getOrElse { throw new NoSuchElementException() } - } - - override def filter(p: ((A, B)) => Boolean): mutable.Map[A, B] = { - internalMap.asScala.map { case (k, TimeStampedValue(v, t)) => (k, v) }.filter(p) - } - - override def empty: mutable.Map[A, B] = new TimeStampedHashMap[A, B]() - - override def size: Int = internalMap.size - - override def foreach[U](f: ((A, B)) => U): Unit = { - val it = getEntrySet.iterator - while(it.hasNext) { - val entry = it.next() - val kv = (entry.getKey, entry.getValue.value) - f(kv) - } - } - - def putIfAbsent(key: A, value: B): Option[B] = { - val prev = internalMap.putIfAbsent(key, TimeStampedValue(value, currentTime)) - Option(prev).map(_.value) - } - - def putAll(map: Map[A, B]): Unit = { - map.foreach { case (k, v) => update(k, v) } - } - - def toMap: Map[A, B] = iterator.toMap - - def clearOldValues(threshTime: Long, f: (A, B) => Unit): Unit = { - val it = getEntrySet.iterator - while (it.hasNext) { - val entry = it.next() - if (entry.getValue.timestamp < threshTime) { - f(entry.getKey, entry.getValue.value) - logDebug("Removing key " + entry.getKey) - it.remove() - } - } - } - - /** Removes old key-value pairs that have timestamp earlier than `threshTime`. */ - def clearOldValues(threshTime: Long): Unit = { - clearOldValues(threshTime, (_, _) => ()) - } - - private def currentTime: Long = System.currentTimeMillis - - // For testing - - def getTimeStampedValue(key: A): Option[TimeStampedValue[B]] = { - Option(internalMap.get(key)) - } - - def getTimestamp(key: A): Option[Long] = { - getTimeStampedValue(key).map(_.timestamp) - } -} diff --git a/core/src/main/scala-2.13/org/apache/spark/util/BoundedPriorityQueue.scala b/core/src/main/scala/org/apache/spark/util/BoundedPriorityQueue.scala similarity index 100% rename from core/src/main/scala-2.13/org/apache/spark/util/BoundedPriorityQueue.scala rename to core/src/main/scala/org/apache/spark/util/BoundedPriorityQueue.scala diff --git a/core/src/main/scala-2.13/org/apache/spark/util/Iterators.scala b/core/src/main/scala/org/apache/spark/util/Iterators.scala similarity index 100% rename from core/src/main/scala-2.13/org/apache/spark/util/Iterators.scala rename to core/src/main/scala/org/apache/spark/util/Iterators.scala diff --git a/core/src/main/scala-2.13/org/apache/spark/util/TimeStampedHashMap.scala b/core/src/main/scala/org/apache/spark/util/TimeStampedHashMap.scala similarity index 100% rename from core/src/main/scala-2.13/org/apache/spark/util/TimeStampedHashMap.scala rename to core/src/main/scala/org/apache/spark/util/TimeStampedHashMap.scala diff --git a/dev/change-scala-version.sh b/dev/change-scala-version.sh index c301f7273ede..af862312d821 100755 --- a/dev/change-scala-version.sh +++ b/dev/change-scala-version.sh @@ -19,7 +19,7 @@ set -e -VALID_VERSIONS=( 2.12 2.13 ) +VALID_VERSIONS=( 2.13 ) usage() { echo "Usage: $(basename $0) [-h|--help] diff --git a/dev/deps/spark-deps-hadoop-3-hive-2.3 b/dev/deps/spark-deps-hadoop-3-hive-2.3 index 0ea9f076b3b7..150f1540aae0 100644 --- a/dev/deps/spark-deps-hadoop-3-hive-2.3 +++ b/dev/deps/spark-deps-hadoop-3-hive-2.3 @@ -5,7 +5,7 @@ RoaringBitmap/0.9.45//RoaringBitmap-0.9.45.jar ST4/4.0.4//ST4-4.0.4.jar activation/1.1.1//activation-1.1.1.jar aircompressor/0.25//aircompressor-0.25.jar -algebra_2.12/2.0.1//algebra_2.12-2.0.1.jar +algebra_2.13/2.8.0//algebra_2.13-2.8.0.jar aliyun-java-sdk-core/4.5.10//aliyun-java-sdk-core-4.5.10.jar aliyun-java-sdk-kms/2.11.0//aliyun-java-sdk-kms-2.11.0.jar aliyun-java-sdk-ram/3.1.0//aliyun-java-sdk-ram-3.1.0.jar @@ -30,11 +30,11 @@ azure-keyvault-core/1.0.0//azure-keyvault-core-1.0.0.jar azure-storage/7.0.1//azure-storage-7.0.1.jar blas/3.0.3//blas-3.0.3.jar bonecp/0.8.0.RELEASE//bonecp-0.8.0.RELEASE.jar -breeze-macros_2.12/2.1.0//breeze-macros_2.12-2.1.0.jar -breeze_2.12/2.1.0//breeze_2.12-2.1.0.jar -cats-kernel_2.12/2.1.1//cats-kernel_2.12-2.1.1.jar +breeze-macros_2.13/2.1.0//breeze-macros_2.13-2.1.0.jar +breeze_2.13/2.1.0//breeze_2.13-2.1.0.jar +cats-kernel_2.13/2.8.0//cats-kernel_2.13-2.8.0.jar chill-java/0.10.0//chill-java-0.10.0.jar -chill_2.12/0.10.0//chill_2.12-0.10.0.jar +chill_2.13/0.10.0//chill_2.13-0.10.0.jar commons-cli/1.5.0//commons-cli-1.5.0.jar commons-codec/1.16.0//commons-codec-1.16.0.jar commons-collections/3.2.2//commons-collections-3.2.2.jar @@ -106,7 +106,7 @@ jackson-dataformat-cbor/2.15.2//jackson-dataformat-cbor-2.15.2.jar jackson-dataformat-yaml/2.15.2//jackson-dataformat-yaml-2.15.2.jar jackson-datatype-jsr310/2.15.2//jackson-datatype-jsr310-2.15.2.jar jackson-mapper-asl/1.9.13//jackson-mapper-asl-1.9.13.jar -jackson-module-scala_2.12/2.15.2//jackson-module-scala_2.12-2.15.2.jar +jackson-module-scala_2.13/2.15.2//jackson-module-scala_2.13-2.15.2.jar jakarta.annotation-api/1.3.5//jakarta.annotation-api-1.3.5.jar jakarta.inject/2.6.1//jakarta.inject-2.6.1.jar jakarta.servlet-api/4.0.3//jakarta.servlet-api-4.0.3.jar @@ -114,6 +114,7 @@ jakarta.validation-api/2.0.2//jakarta.validation-api-2.0.2.jar jakarta.ws.rs-api/2.1.6//jakarta.ws.rs-api-2.1.6.jar jakarta.xml.bind-api/2.3.2//jakarta.xml.bind-api-2.3.2.jar janino/3.1.9//janino-3.1.9.jar +java-diff-utils/4.12//java-diff-utils-4.12.jar javassist/3.29.2-GA//javassist-3.29.2-GA.jar javax.jdo/3.2.0-m3//javax.jdo-3.2.0-m3.jar javolution/5.5.1//javolution-5.5.1.jar @@ -131,14 +132,16 @@ jettison/1.5.4//jettison-1.5.4.jar jetty-util-ajax/9.4.52.v20230823//jetty-util-ajax-9.4.52.v20230823.jar jetty-util/9.4.52.v20230823//jetty-util-9.4.52.v20230823.jar jline/2.14.6//jline-2.14.6.jar +jline/3.22.0//jline-3.22.0.jar +jna/5.13.0//jna-5.13.0.jar joda-time/2.12.5//joda-time-2.12.5.jar jodd-core/3.5.2//jodd-core-3.5.2.jar jpam/1.1//jpam-1.1.jar json/1.8//json-1.8.jar -json4s-ast_2.12/3.7.0-M11//json4s-ast_2.12-3.7.0-M11.jar -json4s-core_2.12/3.7.0-M11//json4s-core_2.12-3.7.0-M11.jar -json4s-jackson_2.12/3.7.0-M11//json4s-jackson_2.12-3.7.0-M11.jar -json4s-scalap_2.12/3.7.0-M11//json4s-scalap_2.12-3.7.0-M11.jar +json4s-ast_2.13/3.7.0-M11//json4s-ast_2.13-3.7.0-M11.jar +json4s-core_2.13/3.7.0-M11//json4s-core_2.13-3.7.0-M11.jar +json4s-jackson_2.13/3.7.0-M11//json4s-jackson_2.13-3.7.0-M11.jar +json4s-scalap_2.13/3.7.0-M11//json4s-scalap_2.13-3.7.0-M11.jar jsr305/3.0.0//jsr305-3.0.0.jar jta/1.1//jta-1.1.jar jul-to-slf4j/2.0.9//jul-to-slf4j-2.0.9.jar @@ -226,21 +229,22 @@ pickle/1.3//pickle-1.3.jar py4j/0.10.9.7//py4j-0.10.9.7.jar remotetea-oncrpc/1.1.2//remotetea-oncrpc-1.1.2.jar rocksdbjni/8.3.2//rocksdbjni-8.3.2.jar -scala-collection-compat_2.12/2.7.0//scala-collection-compat_2.12-2.7.0.jar -scala-compiler/2.12.18//scala-compiler-2.12.18.jar -scala-library/2.12.18//scala-library-2.12.18.jar -scala-parser-combinators_2.12/2.3.0//scala-parser-combinators_2.12-2.3.0.jar -scala-reflect/2.12.18//scala-reflect-2.12.18.jar -scala-xml_2.12/2.2.0//scala-xml_2.12-2.2.0.jar +scala-collection-compat_2.13/2.7.0//scala-collection-compat_2.13-2.7.0.jar +scala-compiler/2.13.11//scala-compiler-2.13.11.jar +scala-library/2.13.11//scala-library-2.13.11.jar +scala-parallel-collections_2.13/1.0.4//scala-parallel-collections_2.13-1.0.4.jar +scala-parser-combinators_2.13/2.3.0//scala-parser-combinators_2.13-2.3.0.jar +scala-reflect/2.13.11//scala-reflect-2.13.11.jar +scala-xml_2.13/2.2.0//scala-xml_2.13-2.2.0.jar shims/0.9.45//shims-0.9.45.jar slf4j-api/2.0.9//slf4j-api-2.0.9.jar snakeyaml-engine/2.6//snakeyaml-engine-2.6.jar snakeyaml/2.0//snakeyaml-2.0.jar snappy-java/1.1.10.3//snappy-java-1.1.10.3.jar -spire-macros_2.12/0.17.0//spire-macros_2.12-0.17.0.jar -spire-platform_2.12/0.17.0//spire-platform_2.12-0.17.0.jar -spire-util_2.12/0.17.0//spire-util_2.12-0.17.0.jar -spire_2.12/0.17.0//spire_2.12-0.17.0.jar +spire-macros_2.13/0.18.0//spire-macros_2.13-0.18.0.jar +spire-platform_2.13/0.18.0//spire-platform_2.13-0.18.0.jar +spire-util_2.13/0.18.0//spire-util_2.13-0.18.0.jar +spire_2.13/0.18.0//spire_2.13-0.18.0.jar stax-api/1.0.1//stax-api-1.0.1.jar stream/2.9.6//stream-2.9.6.jar super-csv/2.2.0//super-csv-2.2.0.jar diff --git a/dev/lint-scala b/dev/lint-scala index 8adac2cecbdb..7937aa68f2e2 100755 --- a/dev/lint-scala +++ b/dev/lint-scala @@ -24,7 +24,7 @@ SPARK_ROOT_DIR="$(dirname $SCRIPT_DIR)" # For Spark Connect, we actively enforce scalafmt and check that the produced diff is empty. ERRORS=$(./build/mvn \ - -Pscala-2.12 \ + -Pscala-2.13 \ scalafmt:format \ -Dscalafmt.skip=false \ -Dscalafmt.validateOnly=true \ @@ -38,7 +38,7 @@ ERRORS=$(./build/mvn \ if test ! -z "$ERRORS"; then echo -e "The scalafmt check failed on connector/connect at following occurrences:\n\n$ERRORS\n" echo "Before submitting your change, please make sure to format your code using the following command:" - echo "./build/mvn -Pscala-2.12 scalafmt:format -Dscalafmt.skip=false -Dscalafmt.validateOnly=false -Dscalafmt.changedOnly=false -pl connector/connect/common -pl connector/connect/server -pl connector/connect/client/jvm" + echo "./build/mvn -Pscala-2.13 scalafmt:format -Dscalafmt.skip=false -Dscalafmt.validateOnly=false -Dscalafmt.changedOnly=false -pl connector/connect/common -pl connector/connect/server -pl connector/connect/client/jvm" exit 1 else echo -e "Scalafmt checks passed." diff --git a/dev/mima b/dev/mima index 859301b4d669..17e9abe4bfb9 100755 --- a/dev/mima +++ b/dev/mima @@ -24,9 +24,9 @@ set -e FWDIR="$(cd "`dirname "$0"`"/..; pwd)" cd "$FWDIR" -SPARK_PROFILES=${1:-"-Pmesos -Pkubernetes -Pyarn -Pspark-ganglia-lgpl -Pkinesis-asl -Phive-thriftserver -Phive"} -TOOLS_CLASSPATH="$(build/sbt -DcopyDependencies=false "export tools/fullClasspath" | grep jar | tail -n1)" -OLD_DEPS_CLASSPATH="$(build/sbt -DcopyDependencies=false $SPARK_PROFILES "export oldDeps/fullClasspath" | grep jar | tail -n1)" +SPARK_PROFILES=${1:-"-Pscala-2.13 -Pmesos -Pkubernetes -Pyarn -Pspark-ganglia-lgpl -Pkinesis-asl -Phive-thriftserver -Phive"} +TOOLS_CLASSPATH="$(build/sbt -Pscala-2.13 -DcopyDependencies=false "export tools/fullClasspath" | grep jar | tail -n1)" +OLD_DEPS_CLASSPATH="$(build/sbt -Pscala-2.13 -DcopyDependencies=false $SPARK_PROFILES "export oldDeps/fullClasspath" | grep jar | tail -n1)" rm -f .generated-mima* @@ -42,7 +42,7 @@ $JAVA_CMD \ -cp "$TOOLS_CLASSPATH:$OLD_DEPS_CLASSPATH" \ org.apache.spark.tools.GenerateMIMAIgnore -echo -e "q\n" | build/sbt -mem 5632 -DcopyDependencies=false "$@" mimaReportBinaryIssues | grep -v -e "info.*Resolving" +echo -e "q\n" | build/sbt -Pscala-2.13 -mem 5632 -DcopyDependencies=false "$@" mimaReportBinaryIssues | grep -v -e "info.*Resolving" ret_val=$? if [ $ret_val != 0 ]; then diff --git a/dev/run-tests.py b/dev/run-tests.py index 559e2017be10..57fe1de811d8 100755 --- a/dev/run-tests.py +++ b/dev/run-tests.py @@ -181,7 +181,6 @@ def get_scala_profiles(scala_version): return [] # assume it's default. sbt_maven_scala_profiles = { - "scala2.12": ["-Pscala-2.12"], "scala2.13": ["-Pscala-2.13"], } diff --git a/dev/scalafmt b/dev/scalafmt index 3971f7a69e72..de9290ec52bd 100755 --- a/dev/scalafmt +++ b/dev/scalafmt @@ -17,6 +17,6 @@ # limitations under the License. # -VERSION="${@:-2.12}" +VERSION="${@:-2.13}" ./build/mvn -Pscala-$VERSION scalafmt:format -Dscalafmt.skip=false -Dscalafmt.validateOnly=false diff --git a/dev/test-dependencies.sh b/dev/test-dependencies.sh index d7967ac3afa9..07da6497b97d 100755 --- a/dev/test-dependencies.sh +++ b/dev/test-dependencies.sh @@ -62,7 +62,7 @@ SCALA_BINARY_VERSION=$($MVN -q \ -Dexec.args='${scala.binary.version}' \ --non-recursive \ org.codehaus.mojo:exec-maven-plugin:1.6.0:exec | grep -E '[0-9]+\.[0-9]+') -if [[ "$SCALA_BINARY_VERSION" != "2.12" ]]; then +if [[ "$SCALA_BINARY_VERSION" != "2.13" ]]; then echo "Skip dependency testing on $SCALA_BINARY_VERSION" exit 0 fi diff --git a/docs/_config.yml b/docs/_config.yml index fcc50d22e2e1..80235fb1da57 100644 --- a/docs/_config.yml +++ b/docs/_config.yml @@ -21,8 +21,8 @@ include: # of Spark, Scala, and Mesos. SPARK_VERSION: 4.0.0-SNAPSHOT SPARK_VERSION_SHORT: 4.0.0 -SCALA_BINARY_VERSION: "2.12" -SCALA_VERSION: "2.12.18" +SCALA_BINARY_VERSION: "2.13" +SCALA_VERSION: "2.13.11" MESOS_VERSION: 1.0.0 SPARK_ISSUE_TRACKER_URL: https://issues.apache.org/jira/browse/SPARK SPARK_GITHUB_URL: https://github.com/apache/spark diff --git a/docs/_plugins/copy_api_dirs.rb b/docs/_plugins/copy_api_dirs.rb index 28d5e0d82c93..9cb073ef1e00 100644 --- a/docs/_plugins/copy_api_dirs.rb +++ b/docs/_plugins/copy_api_dirs.rb @@ -26,8 +26,8 @@ curr_dir = pwd cd("..") - puts "Running 'build/sbt -Pkinesis-asl clean compile unidoc' from " + pwd + "; this may take a few minutes..." - system("build/sbt -Pkinesis-asl clean compile unidoc") || raise("Unidoc generation failed") + puts "Running 'build/sbt -Pscala-2.13 -Pkinesis-asl clean compile unidoc' from " + pwd + "; this may take a few minutes..." + system("build/sbt -Pscala-2.13 -Pkinesis-asl clean compile unidoc") || raise("Unidoc generation failed") puts "Moving back into docs dir." cd("docs") @@ -37,7 +37,7 @@ # Copy over the unified ScalaDoc for all projects to api/scala. # This directory will be copied over to _site when `jekyll` command is run. - source = "../target/scala-2.12/unidoc" + source = "../target/scala-2.13/unidoc" dest = "api/scala" puts "Making directory " + dest @@ -119,8 +119,8 @@ puts "Moving to project root and building API docs." cd("..") - puts "Running 'build/sbt clean package -Phive' from " + pwd + "; this may take a few minutes..." - system("build/sbt clean package -Phive") || raise("PySpark doc generation failed") + puts "Running 'build/sbt -Pscala-2.13 clean package -Phive' from " + pwd + "; this may take a few minutes..." + system("build/sbt -Pscala-2.13 clean package -Phive") || raise("PySpark doc generation failed") puts "Moving back into docs dir." cd("docs") @@ -165,8 +165,8 @@ puts "Moving to project root and building API docs." cd("..") - puts "Running 'build/sbt clean package -Phive' from " + pwd + "; this may take a few minutes..." - system("build/sbt clean package -Phive") || raise("SQL doc generation failed") + puts "Running 'build/sbt -Pscala-2.13 clean package -Phive' from " + pwd + "; this may take a few minutes..." + system("build/sbt -Pscala-2.13 clean package -Phive") || raise("SQL doc generation failed") puts "Moving back into docs dir." cd("docs") diff --git a/docs/building-spark.md b/docs/building-spark.md index bbbc51d8c22c..b7d1bacea3cc 100644 --- a/docs/building-spark.md +++ b/docs/building-spark.md @@ -28,7 +28,7 @@ license: | The Maven-based build is the build of reference for Apache Spark. Building Spark using Maven requires Maven 3.9.4 and Java 8/11/17. -Spark requires Scala 2.12/2.13; support for Scala 2.11 was removed in Spark 3.0.0. +Spark requires Scala 2.13; support for Scala 2.12 was removed in Spark 4.0.0. ### Setting up Maven's Memory Usage diff --git a/docs/index.md b/docs/index.md index bd77fd75a0b3..4620c4f072b4 100644 --- a/docs/index.md +++ b/docs/index.md @@ -34,7 +34,7 @@ source, visit [Building Spark](building-spark.html). Spark runs on both Windows and UNIX-like systems (e.g. Linux, Mac OS), and it should run on any platform that runs a supported version of Java. This should include JVMs on x86_64 and ARM64. It's easy to run locally on one machine --- all you need is to have `java` installed on your system `PATH`, or the `JAVA_HOME` environment variable pointing to a Java installation. -Spark runs on Java 8/11/17, Scala 2.12/2.13, Python 3.8+, and R 3.5+. +Spark runs on Java 8/11/17, Scala 2.13, Python 3.8+, and R 3.5+. Java 8 prior to version 8u371 support is deprecated as of Spark 3.5.0. When using the Scala API, it is necessary for applications to use the same version of Scala that Spark was compiled for. For example, when using Scala 2.13, use Spark compiled for 2.13, and compile code/applications for Scala 2.13 as well. diff --git a/docs/spark-connect-overview.md b/docs/spark-connect-overview.md index 0673763f03bc..82d84f39ca1d 100644 --- a/docs/spark-connect-overview.md +++ b/docs/spark-connect-overview.md @@ -101,13 +101,13 @@ Spark before and run the `start-connect-server.sh` script to start Spark server Spark Connect, like in this example: {% highlight bash %} -./sbin/start-connect-server.sh --packages org.apache.spark:spark-connect_2.12:{{site.SPARK_VERSION_SHORT}} +./sbin/start-connect-server.sh --packages org.apache.spark:spark-connect_2.13:{{site.SPARK_VERSION_SHORT}} {% endhighlight %} -Note that we include a Spark Connect package (`spark-connect_2.12:{{site.SPARK_VERSION_SHORT}}`), when starting +Note that we include a Spark Connect package (`spark-connect_2.13:{{site.SPARK_VERSION_SHORT}}`), when starting Spark server. This is required to use Spark Connect. Make sure to use the same version of the package as the Spark version you downloaded previously. In this example, -Spark {{site.SPARK_VERSION_SHORT}} with Scala 2.12. +Spark {{site.SPARK_VERSION_SHORT}} with Scala 2.13. Now Spark server is running and ready to accept Spark Connect sessions from client applications. In the next section we will walk through how to use Spark Connect diff --git a/docs/storage-openstack-swift.md b/docs/storage-openstack-swift.md index 73b21a1f7c27..52e2d11b9912 100644 --- a/docs/storage-openstack-swift.md +++ b/docs/storage-openstack-swift.md @@ -44,7 +44,7 @@ For example, for Maven support, add the following to the pom.xml fi ... org.apache.spark - hadoop-cloud_2.12 + hadoop-cloud_2.13 ${spark.version} ... diff --git a/examples/pom.xml b/examples/pom.xml index c5644f6a0895..9470f13ecfc2 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../pom.xml - spark-examples_2.12 + spark-examples_2.13 jar Spark Project Examples https://spark.apache.org/ diff --git a/graphx/pom.xml b/graphx/pom.xml index 3771a274082c..ce29c1845422 100644 --- a/graphx/pom.xml +++ b/graphx/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../pom.xml - spark-graphx_2.12 + spark-graphx_2.13 graphx diff --git a/hadoop-cloud/pom.xml b/hadoop-cloud/pom.xml index b27df3597eb1..934ff29be406 100644 --- a/hadoop-cloud/pom.xml +++ b/hadoop-cloud/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../pom.xml - spark-hadoop-cloud_2.12 + spark-hadoop-cloud_2.13 jar Spark Project Hadoop Cloud Integration diff --git a/launcher/pom.xml b/launcher/pom.xml index d87f7bd8fef5..c47244ff887a 100644 --- a/launcher/pom.xml +++ b/launcher/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../pom.xml - spark-launcher_2.12 + spark-launcher_2.13 jar Spark Project Launcher https://spark.apache.org/ diff --git a/mllib-local/pom.xml b/mllib-local/pom.xml index fe8ec5721ea7..408aec1ff276 100644 --- a/mllib-local/pom.xml +++ b/mllib-local/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../pom.xml - spark-mllib-local_2.12 + spark-mllib-local_2.13 mllib-local diff --git a/mllib/pom.xml b/mllib/pom.xml index 0b5292d7c63b..70c116846f4b 100644 --- a/mllib/pom.xml +++ b/mllib/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../pom.xml - spark-mllib_2.12 + spark-mllib_2.13 mllib @@ -91,12 +91,12 @@ test-jar test - org.scala-lang.modules scala-parallel-collections_${scala.binary.version} - --> + org.scalanlp breeze_${scala.binary.version} diff --git a/pom.xml b/pom.xml index 971cb07ea40e..65aa656aca2b 100644 --- a/pom.xml +++ b/pom.xml @@ -25,7 +25,7 @@ 18 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT pom Spark Project Parent POM @@ -172,8 +172,8 @@ 3.2.2 4.4 - 2.12.18 - 2.12 + 2.13.11 + 2.13 2.2.0 4.7.1 @@ -440,13 +440,13 @@ ${project.version} test-jar - org.scala-lang.modules scala-parallel-collections_${scala.binary.version} 1.0.4 - --> + com.twitter chill_${scala.binary.version} @@ -1110,7 +1110,7 @@ org.scala-lang.modules - scala-xml_2.12 + scala-xml_2.13 @@ -2819,6 +2819,7 @@ org.jboss.netty org.codehaus.groovy + *:*_2.12 *:*_2.11 *:*_2.10 @@ -2934,9 +2935,53 @@ -feature -explaintypes -target:jvm-1.8 - -Xfatal-warnings - -Ywarn-unused:imports - -P:silencer:globalFilters=.*deprecated.* + -Wconf:cat=deprecation:wv,any:e + -Wunused:imports + + -Wconf:cat=scaladoc:wv + -Wconf:cat=lint-multiarg-infix:wv + -Wconf:cat=other-nullary-override:wv + -Wconf:cat=other-match-analysis&site=org.apache.spark.sql.catalyst.catalog.SessionCatalog.lookupFunction.catalogFunction:wv + -Wconf:cat=other-pure-statement&site=org.apache.spark.streaming.util.FileBasedWriteAheadLog.readAll.readFile:wv + -Wconf:cat=other-pure-statement&site=org.apache.spark.scheduler.OutputCommitCoordinatorSuite.<local OutputCommitCoordinatorSuite>.futureAction:wv + + -Wconf:msg=^(?=.*?method|value|type|object|trait|inheritance)(?=.*?deprecated)(?=.*?since 2.13).+$:s + -Wconf:msg=^(?=.*?Widening conversion from)(?=.*?is deprecated because it loses precision).+$:s + -Wconf:msg=Auto-application to \`\(\)\` is deprecated:s + -Wconf:msg=method with a single empty parameter list overrides method without any parameter list:s + -Wconf:msg=method without a parameter list overrides a method with a single empty one:s + + -Wconf:cat=deprecation&msg=procedure syntax is deprecated:e + + -Wconf:cat=unchecked&msg=outer reference:s + -Wconf:cat=unchecked&msg=eliminated by erasure:s + -Wconf:msg=^(?=.*?a value of type)(?=.*?cannot also be).+$:s + + -Wconf:cat=unused-imports&src=org\/apache\/spark\/graphx\/impl\/VertexPartitionBase.scala:s + -Wconf:cat=unused-imports&src=org\/apache\/spark\/graphx\/impl\/VertexPartitionBaseOps.scala:s + + -Wconf:msg=Implicit definition should have explicit type:s -Xss128m @@ -2952,13 +2997,6 @@ ${java.version} -Xlint:all,-serial,-path,-try - - - com.github.ghik - silencer-plugin_${scala.version} - 1.7.13 - - @@ -3611,97 +3649,20 @@ - scala-2.12 + scala-2.13 - 2.12.18 - - - - - - - - - - - scala-2.13 - 2.13.11 - 2.13 - - net.alchim31.maven - scala-maven-plugin - - - -unchecked - -deprecation - -feature - -explaintypes - -target:jvm-1.8 - -Wconf:cat=deprecation:wv,any:e - -Wunused:imports - - -Wconf:cat=scaladoc:wv - -Wconf:cat=lint-multiarg-infix:wv - -Wconf:cat=other-nullary-override:wv - -Wconf:cat=other-match-analysis&site=org.apache.spark.sql.catalyst.catalog.SessionCatalog.lookupFunction.catalogFunction:wv - -Wconf:cat=other-pure-statement&site=org.apache.spark.streaming.util.FileBasedWriteAheadLog.readAll.readFile:wv - -Wconf:cat=other-pure-statement&site=org.apache.spark.scheduler.OutputCommitCoordinatorSuite.<local OutputCommitCoordinatorSuite>.futureAction:wv - - -Wconf:msg=^(?=.*?method|value|type|object|trait|inheritance)(?=.*?deprecated)(?=.*?since 2.13).+$:s - -Wconf:msg=^(?=.*?Widening conversion from)(?=.*?is deprecated because it loses precision).+$:s - -Wconf:msg=Auto-application to \`\(\)\` is deprecated:s - -Wconf:msg=method with a single empty parameter list overrides method without any parameter list:s - -Wconf:msg=method without a parameter list overrides a method with a single empty one:s - - -Wconf:cat=deprecation&msg=procedure syntax is deprecated:e - - -Wconf:cat=unchecked&msg=outer reference:s - -Wconf:cat=unchecked&msg=eliminated by erasure:s - -Wconf:msg=^(?=.*?a value of type)(?=.*?cannot also be).+$:s - - -Wconf:cat=unused-imports&src=org\/apache\/spark\/graphx\/impl\/VertexPartitionBase.scala:s - -Wconf:cat=unused-imports&src=org\/apache\/spark\/graphx\/impl\/VertexPartitionBaseOps.scala:s - - -Wconf:msg=Implicit definition should have explicit type:s - - - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - add-scala-sources - generate-sources - - add-source - - - - src/main/scala-${scala.binary.version} - - - - - add-scala-test-sources - generate-test-sources - - add-test-source - - - - src/test/scala-${scala.binary.version} - - - - - - diff --git a/repl/src/main/scala-2.12/org/apache/spark/repl/Main.scala b/repl/src/main/scala-2.12/org/apache/spark/repl/Main.scala deleted file mode 100644 index eaca4ad6ee29..000000000000 --- a/repl/src/main/scala-2.12/org/apache/spark/repl/Main.scala +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.repl - -import java.io.File -import java.net.URI -import java.util.Locale - -import scala.tools.nsc.GenericRunnerSettings - -import org.apache.spark._ -import org.apache.spark.internal.Logging -import org.apache.spark.sql.SparkSession -import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATION -import org.apache.spark.util.Utils - -object Main extends Logging { - - initializeLogIfNecessary(true) - Signaling.cancelOnInterrupt() - - val conf = new SparkConf() - val rootDir = conf.getOption("spark.repl.classdir").getOrElse(Utils.getLocalDir(conf)) - val outputDir = Utils.createTempDir(root = rootDir, namePrefix = "repl") - - var sparkContext: SparkContext = _ - var sparkSession: SparkSession = _ - // this is a public var because tests reset it. - var interp: SparkILoop = _ - - private var hasErrors = false - private var isShellSession = false - - private def scalaOptionError(msg: String): Unit = { - hasErrors = true - // scalastyle:off println - Console.err.println(msg) - // scalastyle:on println - } - - def main(args: Array[String]): Unit = { - isShellSession = true - doMain(args, new SparkILoop) - } - - // Visible for testing - private[repl] def doMain(args: Array[String], _interp: SparkILoop): Unit = { - interp = _interp - val jars = Utils.getLocalUserJarsForShell(conf) - // Remove file:///, file:// or file:/ scheme if exists for each jar - .map { x => if (x.startsWith("file:")) new File(new URI(x)).getPath else x } - .mkString(File.pathSeparator) - val interpArguments = List( - "-Yrepl-class-based", - "-Yrepl-outdir", s"${outputDir.getAbsolutePath}", - "-classpath", jars - ) ++ args.toList - - val settings = new GenericRunnerSettings(scalaOptionError) - settings.processArguments(interpArguments, true) - - if (!hasErrors) { - interp.process(settings) // Repl starts and goes in loop of R.E.P.L - Option(sparkContext).foreach(_.stop) - } - } - - def createSparkSession(): SparkSession = { - try { - val execUri = System.getenv("SPARK_EXECUTOR_URI") - conf.setIfMissing("spark.app.name", "Spark shell") - // SparkContext will detect this configuration and register it with the RpcEnv's - // file server, setting spark.repl.class.uri to the actual URI for executors to - // use. This is sort of ugly but since executors are started as part of SparkContext - // initialization in certain cases, there's an initialization order issue that prevents - // this from being set after SparkContext is instantiated. - conf.set("spark.repl.class.outputDir", outputDir.getAbsolutePath()) - if (execUri != null) { - conf.set("spark.executor.uri", execUri) - } - if (System.getenv("SPARK_HOME") != null) { - conf.setSparkHome(System.getenv("SPARK_HOME")) - } - - val builder = SparkSession.builder.config(conf) - if (conf.get(CATALOG_IMPLEMENTATION.key, "hive").toLowerCase(Locale.ROOT) == "hive") { - if (SparkSession.hiveClassesArePresent) { - // In the case that the property is not set at all, builder's config - // does not have this value set to 'hive' yet. The original default - // behavior is that when there are hive classes, we use hive catalog. - sparkSession = builder.enableHiveSupport().getOrCreate() - logInfo("Created Spark session with Hive support") - } else { - // Need to change it back to 'in-memory' if no hive classes are found - // in the case that the property is set to hive in spark-defaults.conf - builder.config(CATALOG_IMPLEMENTATION.key, "in-memory") - sparkSession = builder.getOrCreate() - logInfo("Created Spark session") - } - } else { - // In the case that the property is set but not to 'hive', the internal - // default is 'in-memory'. So the sparkSession will use in-memory catalog. - sparkSession = builder.getOrCreate() - logInfo("Created Spark session") - } - sparkContext = sparkSession.sparkContext - sparkSession - } catch { - case e: ClassNotFoundException if isShellSession && e.getMessage.contains( - "org.apache.spark.sql.connect.SparkConnectPlugin") => - logError("Failed to load spark connect plugin.") - logError("You need to build Spark with -Pconnect.") - sys.exit(1) - case e: Exception if isShellSession => - logError("Failed to initialize Spark session.", e) - sys.exit(1) - } - } - -} diff --git a/repl/src/main/scala-2.12/org/apache/spark/repl/SparkILoop.scala b/repl/src/main/scala-2.12/org/apache/spark/repl/SparkILoop.scala deleted file mode 100644 index 92984ed45f82..000000000000 --- a/repl/src/main/scala-2.12/org/apache/spark/repl/SparkILoop.scala +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.repl - -import java.io.BufferedReader - -// scalastyle:off println -import scala.Predef.{println => _, _} -// scalastyle:on println -import scala.concurrent.Future -import scala.reflect.classTag -import scala.reflect.io.File -import scala.tools.nsc.{GenericRunnerSettings, Properties} -import scala.tools.nsc.Settings -import scala.tools.nsc.interpreter.{isReplDebug, isReplPower, replProps} -import scala.tools.nsc.interpreter.{AbstractOrMissingHandler, ILoop, IMain, JPrintWriter} -import scala.tools.nsc.interpreter.{NamedParam, SimpleReader, SplashLoop, SplashReader} -import scala.tools.nsc.interpreter.StdReplTags.tagOfIMain -import scala.tools.nsc.util.stringFromStream -import scala.util.Properties.{javaVersion, javaVmName, versionString} - -/** - * A Spark-specific interactive shell. - */ -class SparkILoop(in0: Option[BufferedReader], out: JPrintWriter) - extends ILoop(in0, out) { - def this(in0: BufferedReader, out: JPrintWriter) = this(Some(in0), out) - def this() = this(None, new JPrintWriter(Console.out, true)) - - val initializationCommands: Seq[String] = Seq( - """ - @transient val spark = if (org.apache.spark.repl.Main.sparkSession != null) { - org.apache.spark.repl.Main.sparkSession - } else { - org.apache.spark.repl.Main.createSparkSession() - } - @transient val sc = { - val _sc = spark.sparkContext - if (_sc.getConf.getBoolean("spark.ui.reverseProxy", false)) { - val proxyUrl = _sc.getConf.get("spark.ui.reverseProxyUrl", null) - if (proxyUrl != null) { - println( - s"Spark Context Web UI is available at ${proxyUrl}/proxy/${_sc.applicationId}") - } else { - println(s"Spark Context Web UI is available at Spark Master Public URL") - } - } else { - _sc.uiWebUrl.foreach { - webUrl => println(s"Spark context Web UI available at ${webUrl}") - } - } - println("Spark context available as 'sc' " + - s"(master = ${_sc.master}, app id = ${_sc.applicationId}).") - println("Spark session available as 'spark'.") - _sc - } - """, - "import org.apache.spark.SparkContext._", - "import spark.implicits._", - "import spark.sql", - "import org.apache.spark.sql.functions._" - ) - - def initializeSpark(): Unit = { - if (!intp.reporter.hasErrors) { - // `savingReplayStack` removes the commands from session history. - savingReplayStack { - initializationCommands.foreach(intp quietRun _) - } - } else { - throw new RuntimeException(s"Scala $versionString interpreter encountered " + - "errors during initialization") - } - } - - /** Print a welcome message */ - override def printWelcome(): Unit = { - import org.apache.spark.SPARK_VERSION - echo("""Welcome to - ____ __ - / __/__ ___ _____/ /__ - _\ \/ _ \/ _ `/ __/ '_/ - /___/ .__/\_,_/_/ /_/\_\ version %s - /_/ - """.format(SPARK_VERSION)) - val welcomeMsg = "Using Scala %s (%s, Java %s)".format( - versionString, javaVmName, javaVersion) - echo(welcomeMsg) - echo("Type in expressions to have them evaluated.") - echo("Type :help for more information.") - } - - /** Available commands */ - override def commands: List[LoopCommand] = standardCommands - - override def resetCommand(line: String): Unit = { - super.resetCommand(line) - initializeSpark() - echo("Note that after :reset, state of SparkSession and SparkContext is unchanged.") - } - - override def replay(): Unit = { - initializeSpark() - super.replay() - } - - /** - * The following code is mostly a copy of `process` implementation in `ILoop.scala` in Scala - * - * In newer version of Scala, `printWelcome` is the first thing to be called. As a result, - * SparkUI URL information would be always shown after the welcome message. - * - * However, this is inconsistent compared with the existing version of Spark which will always - * show SparkUI URL first. - * - * The only way we can make it consistent will be duplicating the Scala code. - * - * We should remove this duplication once Scala provides a way to load our custom initialization - * code, and also customize the ordering of printing welcome message. - */ - override def process(settings: Settings): Boolean = { - - def newReader = in0.fold(chooseReader(settings))(r => SimpleReader(r, out, interactive = true)) - - /** Reader to use before interpreter is online. */ - def preLoop = { - val sr = SplashReader(newReader) { r => - in = r - in.postInit() - } - in = sr - SplashLoop(sr, prompt) - } - - /* Actions to cram in parallel while collecting first user input at prompt. - * Run with output muted both from ILoop and from the intp reporter. - */ - def loopPostInit(): Unit = mumly { - // Bind intp somewhere out of the regular namespace where - // we can get at it in generated code. - intp.quietBind(NamedParam[IMain]("$intp", intp)(tagOfIMain, classTag[IMain])) - - // Auto-run code via some setting. - ( replProps.replAutorunCode.option - flatMap (f => File(f).safeSlurp()) - foreach (intp quietRun _) - ) - // power mode setup - if (isReplPower) enablePowerMode(true) - initializeSpark() - loadInitFiles() - // SI-7418 Now, and only now, can we enable TAB completion. - in.postInit() - } - def loadInitFiles(): Unit = settings match { - case settings: GenericRunnerSettings => - for (f <- settings.loadfiles.value) { - loadCommand(f) - addReplay(s":load $f") - } - for (f <- settings.pastefiles.value) { - pasteCommand(f) - addReplay(s":paste $f") - } - case _ => - } - // wait until after startup to enable noisy settings - def withSuppressedSettings[A](body: => A): A = { - val ss = this.settings - import ss._ - val noisy = List(Xprint, Ytyperdebug) - val noisesome = noisy.exists(!_.isDefault) - val current = (Xprint.value, Ytyperdebug.value) - if (isReplDebug || !noisesome) body - else { - this.settings.Xprint.value = List.empty - this.settings.Ytyperdebug.value = false - try body - finally { - Xprint.value = current._1 - Ytyperdebug.value = current._2 - intp.global.printTypings = current._2 - } - } - } - def startup(): String = withSuppressedSettings { - // let them start typing - val splash = preLoop - - // while we go fire up the REPL - try { - // don't allow ancient sbt to hijack the reader - savingReader { - createInterpreter() - } - intp.initializeSynchronous() - - val field = classOf[ILoop].getDeclaredFields.filter(_.getName.contains("globalFuture")).head - field.setAccessible(true) - field.set(this, Future successful true) - - if (intp.reporter.hasErrors) { - echo("Interpreter encountered errors during initialization!") - null - } else { - loopPostInit() - printWelcome() - splash.start() - - val line = splash.line // what they typed in while they were waiting - if (line == null) { // they ^D - try out print Properties.shellInterruptedString - finally closeInterpreter() - } - line - } - } finally splash.stop() - } - - this.settings = settings - startup() match { - case null => false - case line => - try loop(line) match { - case LineResults.EOF => out print Properties.shellInterruptedString - case _ => - } - catch AbstractOrMissingHandler() - finally closeInterpreter() - true - } - } -} - -object SparkILoop { - - /** - * Creates an interpreter loop with default settings and feeds - * the given code to it as input. - */ - def run(code: String, sets: Settings = new Settings): String = { - import java.io.{ BufferedReader, StringReader, OutputStreamWriter } - - stringFromStream { ostream => - Console.withOut(ostream) { - val input = new BufferedReader(new StringReader(code)) - val output = new JPrintWriter(new OutputStreamWriter(ostream), true) - val repl = new SparkILoop(input, output) - - if (sets.classpath.isDefault) { - sets.classpath.value = sys.props("java.class.path") - } - repl process sets - } - } - } - def run(lines: List[String]): String = run(lines.map(_ + "\n").mkString) -} diff --git a/repl/src/main/scala-2.13/org/apache/spark/repl/Main.scala b/repl/src/main/scala/org/apache/spark/repl/Main.scala similarity index 100% rename from repl/src/main/scala-2.13/org/apache/spark/repl/Main.scala rename to repl/src/main/scala/org/apache/spark/repl/Main.scala diff --git a/repl/src/main/scala-2.13/org/apache/spark/repl/SparkILoop.scala b/repl/src/main/scala/org/apache/spark/repl/SparkILoop.scala similarity index 100% rename from repl/src/main/scala-2.13/org/apache/spark/repl/SparkILoop.scala rename to repl/src/main/scala/org/apache/spark/repl/SparkILoop.scala diff --git a/repl/src/test/scala-2.12/org/apache/spark/repl/Repl2Suite.scala b/repl/src/test/scala-2.12/org/apache/spark/repl/Repl2Suite.scala deleted file mode 100644 index 15b45ad797ef..000000000000 --- a/repl/src/test/scala-2.12/org/apache/spark/repl/Repl2Suite.scala +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.repl - -import java.io._ - -import scala.tools.nsc.interpreter.SimpleReader - -import org.apache.spark.{SparkContext, SparkFunSuite} - -class Repl2Suite extends SparkFunSuite { - test("propagation of local properties") { - // A mock ILoop that doesn't install the SIGINT handler. - class ILoop(out: PrintWriter) extends SparkILoop(None, out) { - settings = new scala.tools.nsc.Settings - settings.usejavacp.value = true - org.apache.spark.repl.Main.interp = this - in = SimpleReader() - } - - val out = new StringWriter() - Main.interp = new ILoop(new PrintWriter(out)) - Main.sparkContext = new SparkContext("local", "repl-test") - Main.interp.createInterpreter() - - Main.sparkContext.setLocalProperty("someKey", "someValue") - - // Make sure the value we set in the caller to interpret is propagated in the thread that - // interprets the command. - Main.interp.interpret("org.apache.spark.repl.Main.sparkContext.getLocalProperty(\"someKey\")") - assert(out.toString.contains("someValue")) - - Main.sparkContext.stop() - System.clearProperty("spark.driver.port") - } -} diff --git a/repl/src/test/scala-2.12/org/apache/spark/repl/SingletonRepl2Suite.scala b/repl/src/test/scala-2.12/org/apache/spark/repl/SingletonRepl2Suite.scala deleted file mode 100644 index a4eff392a2c9..000000000000 --- a/repl/src/test/scala-2.12/org/apache/spark/repl/SingletonRepl2Suite.scala +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.repl - -import java.io._ - -import org.apache.spark.SparkFunSuite - -/** - * A special test suite for REPL that all test cases share one REPL instance. - */ -class SingletonRepl2Suite extends SparkFunSuite { - private val out = new StringWriter() - private val in = new PipedOutputStream() - private var thread: Thread = _ - - private val CONF_EXECUTOR_CLASSPATH = "spark.executor.extraClassPath" - private val oldExecutorClasspath = System.getProperty(CONF_EXECUTOR_CLASSPATH) - - override def beforeAll(): Unit = { - super.beforeAll() - - val classpath = System.getProperty("java.class.path") - System.setProperty(CONF_EXECUTOR_CLASSPATH, classpath) - - Main.conf.set("spark.master", "local-cluster[2,1,1024]") - val interp = new SparkILoop( - new BufferedReader(new InputStreamReader(new PipedInputStream(in))), - new PrintWriter(out)) - - // Forces to create new SparkContext - Main.sparkContext = null - Main.sparkSession = null - - // Starts a new thread to run the REPL interpreter, so that we won't block. - thread = new Thread(() => Main.doMain(Array("-classpath", classpath), interp)) - thread.setDaemon(true) - thread.start() - - waitUntil(() => out.toString.contains("Type :help for more information")) - } - - override def afterAll(): Unit = { - in.close() - thread.join() - if (oldExecutorClasspath != null) { - System.setProperty(CONF_EXECUTOR_CLASSPATH, oldExecutorClasspath) - } else { - System.clearProperty(CONF_EXECUTOR_CLASSPATH) - } - super.afterAll() - } - - private def waitUntil(cond: () => Boolean): Unit = { - import scala.concurrent.duration._ - import org.scalatest.concurrent.Eventually._ - - eventually(timeout(50.seconds), interval(500.millis)) { - assert(cond(), "current output: " + out.toString) - } - } - - /** - * Run the given commands string in a globally shared interpreter instance. Note that the given - * commands should not crash the interpreter, to not affect other test cases. - */ - def runInterpreter(input: String): String = { - val currentOffset = out.getBuffer.length() - // append a special statement to the end of the given code, so that we can know what's - // the final output of this code snippet and rely on it to wait until the output is ready. - val timestamp = System.currentTimeMillis() - in.write((input + s"\nval _result_$timestamp = 1\n").getBytes) - in.flush() - val stopMessage = s"_result_$timestamp: Int = 1" - waitUntil(() => out.getBuffer.substring(currentOffset).contains(stopMessage)) - out.getBuffer.substring(currentOffset) - } - - def assertContains(message: String, output: String): Unit = { - val isContain = output.contains(message) - assert(isContain, - "Interpreter output did not contain '" + message + "':\n" + output) - } - - def assertDoesNotContain(message: String, output: String): Unit = { - val isContain = output.contains(message) - assert(!isContain, - "Interpreter output contained '" + message + "':\n" + output) - } - - test("SPARK-31399: should clone+clean line object w/ non-serializable state in ClosureCleaner") { - // Test ClosureCleaner when a closure captures the enclosing `this` REPL line object, and that - // object contains an unused non-serializable field. - // Specifically, the closure in this test case contains a directly nested closure, and the - // capture is triggered by the inner closure. - // `ns` should be nulled out, but `topLevelValue` should stay intact. - - // Can't use :paste mode because PipedOutputStream/PipedInputStream doesn't work well with the - // EOT control character (i.e. Ctrl+D). - // Just write things on a single line to emulate :paste mode. - - // NOTE: in order for this test case to trigger the intended scenario, the following three - // variables need to be in the same "input", which will make the REPL pack them into the - // same REPL line object: - // - ns: a non-serializable state, not accessed by the closure; - // - topLevelValue: a serializable state, accessed by the closure; - // - closure: the starting closure, captures the enclosing REPL line object. - val output = runInterpreter( - """ - |class NotSerializableClass(val x: Int) - |val ns = new NotSerializableClass(42); val topLevelValue = "someValue"; val closure = - |(j: Int) => { - | (1 to j).flatMap { x => - | (1 to x).map { y => y + topLevelValue } - | } - |} - |val r = sc.parallelize(0 to 2).map(closure).collect - """.stripMargin) - assertContains("r: Array[scala.collection.immutable.IndexedSeq[String]] = " + - "Array(Vector(), Vector(1someValue), Vector(1someValue, 1someValue, 2someValue))", output) -// assertContains("r: Array[IndexedSeq[String]] = " + -// "Array(Vector(), Vector(1someValue), Vector(1someValue, 1someValue, 2someValue))", output) - assertDoesNotContain("Exception", output) - } - - test("SPARK-31399: ClosureCleaner should discover indirectly nested closure in inner class") { - // Similar to the previous test case, but with indirect closure nesting instead. - // There's still nested closures involved, but the inner closure is indirectly nested in the - // outer closure, with a level of inner class in between them. - // This changes how the inner closure references/captures the outer closure/enclosing `this` - // REPL line object, and covers a different code path in inner closure discovery. - - // `ns` should be nulled out, but `topLevelValue` should stay intact. - - val output = runInterpreter( - """ - |class NotSerializableClass(val x: Int) - |val ns = new NotSerializableClass(42); val topLevelValue = "someValue"; val closure = - |(j: Int) => { - | class InnerFoo { - | val innerClosure = (x: Int) => (1 to x).map { y => y + topLevelValue } - | } - | val innerFoo = new InnerFoo - | (1 to j).flatMap(innerFoo.innerClosure) - |} - |val r = sc.parallelize(0 to 2).map(closure).collect - """.stripMargin) - assertContains("r: Array[scala.collection.immutable.IndexedSeq[String]] = " + - "Array(Vector(), Vector(1someValue), Vector(1someValue, 1someValue, 2someValue))", output) -// assertContains("r: Array[IndexedSeq[String]] = " + -// "Array(Vector(), Vector(1someValue), Vector(1someValue, 1someValue, 2someValue))", output) - assertDoesNotContain("Array(Vector(), Vector(1null), Vector(1null, 1null, 2null)", output) - assertDoesNotContain("Exception", output) - } - - } diff --git a/repl/src/test/scala-2.13/org/apache/spark/repl/Repl2Suite.scala b/repl/src/test/scala/org/apache/spark/repl/Repl2Suite.scala similarity index 100% rename from repl/src/test/scala-2.13/org/apache/spark/repl/Repl2Suite.scala rename to repl/src/test/scala/org/apache/spark/repl/Repl2Suite.scala diff --git a/repl/src/test/scala-2.13/org/apache/spark/repl/SingletonRepl2Suite.scala b/repl/src/test/scala/org/apache/spark/repl/SingletonRepl2Suite.scala similarity index 100% rename from repl/src/test/scala-2.13/org/apache/spark/repl/SingletonRepl2Suite.scala rename to repl/src/test/scala/org/apache/spark/repl/SingletonRepl2Suite.scala diff --git a/resource-managers/kubernetes/core/pom.xml b/resource-managers/kubernetes/core/pom.xml index e95909288c37..f260b8f07ce5 100644 --- a/resource-managers/kubernetes/core/pom.xml +++ b/resource-managers/kubernetes/core/pom.xml @@ -19,12 +19,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../../pom.xml - spark-kubernetes_2.12 + spark-kubernetes_2.13 jar Spark Project Kubernetes diff --git a/resource-managers/kubernetes/integration-tests/README.md b/resource-managers/kubernetes/integration-tests/README.md index 909e5b652d44..b6d39b4211e8 100644 --- a/resource-managers/kubernetes/integration-tests/README.md +++ b/resource-managers/kubernetes/integration-tests/README.md @@ -127,7 +127,7 @@ configuration is provided in `dev/spark-rbac.yaml`. If you prefer to run just the integration tests directly, then you can customise the behaviour via passing system properties to Maven. For example: - mvn integration-test -am -pl :spark-kubernetes-integration-tests_2.12 \ + mvn integration-test -am -pl :spark-kubernetes-integration-tests_2.13 \ -Pkubernetes -Pkubernetes-integration-tests \ -Phadoop-3 -Dhadoop.version=3.3.6 \ -Dspark.kubernetes.test.sparkTgz=spark-3.0.0-SNAPSHOT-bin-example.tgz \ diff --git a/resource-managers/kubernetes/integration-tests/pom.xml b/resource-managers/kubernetes/integration-tests/pom.xml index d1be2dae066f..c5f55c52d0b6 100644 --- a/resource-managers/kubernetes/integration-tests/pom.xml +++ b/resource-managers/kubernetes/integration-tests/pom.xml @@ -19,12 +19,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../../pom.xml - spark-kubernetes-integration-tests_2.12 + spark-kubernetes-integration-tests_2.13 kubernetes-integration-tests diff --git a/resource-managers/mesos/pom.xml b/resource-managers/mesos/pom.xml index 29c341f8c352..da0f31996350 100644 --- a/resource-managers/mesos/pom.xml +++ b/resource-managers/mesos/pom.xml @@ -19,12 +19,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-mesos_2.12 + spark-mesos_2.13 jar Spark Project Mesos diff --git a/resource-managers/yarn/pom.xml b/resource-managers/yarn/pom.xml index e58ab1ea2505..073661e9ac63 100644 --- a/resource-managers/yarn/pom.xml +++ b/resource-managers/yarn/pom.xml @@ -19,12 +19,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-yarn_2.12 + spark-yarn_2.13 jar Spark Project YARN diff --git a/sql/api/pom.xml b/sql/api/pom.xml index 93c47c968e74..bcf01bbe0cd9 100644 --- a/sql/api/pom.xml +++ b/sql/api/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-sql-api_2.12 + spark-sql-api_2.13 jar Spark Project SQL API https://spark.apache.org/ @@ -81,24 +81,6 @@ target/scala-${scala.binary.version}/classes target/scala-${scala.binary.version}/test-classes - - org.codehaus.mojo - build-helper-maven-plugin - - - add-sources - generate-sources - - add-source - - - - src/main/scala-${scala.binary.version} - - - - - org.antlr antlr4-maven-plugin diff --git a/sql/api/src/main/scala-2.12/org/apache/spark/sql/catalyst/util/CaseInsensitiveMap.scala b/sql/api/src/main/scala-2.12/org/apache/spark/sql/catalyst/util/CaseInsensitiveMap.scala deleted file mode 100644 index 14b8f620017f..000000000000 --- a/sql/api/src/main/scala-2.12/org/apache/spark/sql/catalyst/util/CaseInsensitiveMap.scala +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.sql.catalyst.util - -import java.util.Locale - -/** - * Builds a map in which keys are case insensitive. Input map can be accessed for cases where - * case-sensitive information is required. The primary constructor is marked private to avoid - * nested case-insensitive map creation, otherwise the keys in the original map will become - * case-insensitive in this scenario. - * Note: CaseInsensitiveMap is serializable. However, after transformation, e.g. `filterKeys()`, - * it may become not serializable. - */ -class CaseInsensitiveMap[T] private (val originalMap: Map[String, T]) extends Map[String, T] - with Serializable { - - // Note: this class supports Scala 2.12. A parallel source tree has a 2.13 implementation. - - val keyLowerCasedMap = originalMap.map(kv => kv.copy(_1 = kv._1.toLowerCase(Locale.ROOT))) - - override def get(k: String): Option[T] = keyLowerCasedMap.get(k.toLowerCase(Locale.ROOT)) - - override def contains(k: String): Boolean = - keyLowerCasedMap.contains(k.toLowerCase(Locale.ROOT)) - - override def +[B1 >: T](kv: (String, B1)): CaseInsensitiveMap[B1] = { - new CaseInsensitiveMap(originalMap.filter(!_._1.equalsIgnoreCase(kv._1)) + kv) - } - - def ++(xs: TraversableOnce[(String, T)]): CaseInsensitiveMap[T] = { - xs.foldLeft(this)(_ + _) - } - - override def iterator: Iterator[(String, T)] = keyLowerCasedMap.iterator - - override def -(key: String): Map[String, T] = { - new CaseInsensitiveMap(originalMap.filter(!_._1.equalsIgnoreCase(key))) - } - - def toMap: Map[String, T] = originalMap -} - -object CaseInsensitiveMap { - def apply[T](params: Map[String, T]): CaseInsensitiveMap[T] = params match { - case caseSensitiveMap: CaseInsensitiveMap[T] => caseSensitiveMap - case _ => new CaseInsensitiveMap(params) - } -} - diff --git a/sql/api/src/main/scala-2.13/org/apache/spark/sql/catalyst/util/CaseInsensitiveMap.scala b/sql/api/src/main/scala/org/apache/spark/sql/catalyst/util/CaseInsensitiveMap.scala similarity index 100% rename from sql/api/src/main/scala-2.13/org/apache/spark/sql/catalyst/util/CaseInsensitiveMap.scala rename to sql/api/src/main/scala/org/apache/spark/sql/catalyst/util/CaseInsensitiveMap.scala diff --git a/sql/catalyst/pom.xml b/sql/catalyst/pom.xml index 7feeb1581435..48702f69a8a5 100644 --- a/sql/catalyst/pom.xml +++ b/sql/catalyst/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-catalyst_2.12 + spark-catalyst_2.13 jar Spark Project Catalyst https://spark.apache.org/ @@ -83,12 +83,12 @@ spark-sketch_${scala.binary.version} ${project.version} - org.scala-lang.modules scala-parallel-collections_${scala.binary.version} - --> + org.scalacheck scalacheck_${scala.binary.version} @@ -154,24 +154,6 @@ -ea -Xmx4g -Xss4m -XX:ReservedCodeCacheSize=${CodeCacheSize} ${extraJavaTestArgs} -Dio.netty.tryReflectionSetAccessible=true - - org.codehaus.mojo - build-helper-maven-plugin - - - add-sources - generate-sources - - add-source - - - - src/main/scala-${scala.binary.version} - - - - - diff --git a/sql/catalyst/src/main/scala-2.12/org/apache/spark/sql/catalyst/expressions/AttributeMap.scala b/sql/catalyst/src/main/scala-2.12/org/apache/spark/sql/catalyst/expressions/AttributeMap.scala deleted file mode 100644 index 504b65e3db69..000000000000 --- a/sql/catalyst/src/main/scala-2.12/org/apache/spark/sql/catalyst/expressions/AttributeMap.scala +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.sql.catalyst.expressions - -/** - * Builds a map that is keyed by an Attribute's expression id. Using the expression id allows values - * to be looked up even when the attributes used differ cosmetically (i.e., the capitalization - * of the name, or the expected nullability). - */ -object AttributeMap { - def apply[A](kvs: Map[Attribute, A]): AttributeMap[A] = { - new AttributeMap(kvs.map(kv => (kv._1.exprId, kv))) - } - - def apply[A](kvs: Seq[(Attribute, A)]): AttributeMap[A] = { - new AttributeMap(kvs.map(kv => (kv._1.exprId, kv)).toMap) - } - - def apply[A](kvs: Iterable[(Attribute, A)]): AttributeMap[A] = { - new AttributeMap(kvs.map(kv => (kv._1.exprId, kv)).toMap) - } - - def empty[A]: AttributeMap[A] = new AttributeMap(Map.empty) -} - -class AttributeMap[A](val baseMap: Map[ExprId, (Attribute, A)]) - extends Map[Attribute, A] with Serializable { - - // Note: this class supports Scala 2.12. A parallel source tree has a 2.13 implementation. - - override def get(k: Attribute): Option[A] = baseMap.get(k.exprId).map(_._2) - - override def getOrElse[B1 >: A](k: Attribute, default: => B1): B1 = get(k).getOrElse(default) - - override def contains(k: Attribute): Boolean = get(k).isDefined - - override def + [B1 >: A](kv: (Attribute, B1)): AttributeMap[B1] = - AttributeMap(baseMap.values.toMap + kv) - - override def iterator: Iterator[(Attribute, A)] = baseMap.valuesIterator - - override def -(key: Attribute): Map[Attribute, A] = baseMap.values.toMap - key - - def ++(other: AttributeMap[A]): AttributeMap[A] = new AttributeMap(baseMap ++ other.baseMap) -} diff --git a/sql/catalyst/src/main/scala-2.12/org/apache/spark/sql/catalyst/expressions/ExpressionSet.scala b/sql/catalyst/src/main/scala-2.12/org/apache/spark/sql/catalyst/expressions/ExpressionSet.scala deleted file mode 100644 index 3e545f745bae..000000000000 --- a/sql/catalyst/src/main/scala-2.12/org/apache/spark/sql/catalyst/expressions/ExpressionSet.scala +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.sql.catalyst.expressions - -import scala.collection.mutable -import scala.collection.mutable.ArrayBuffer - -object ExpressionSet { - /** Constructs a new [[ExpressionSet]] by applying [[Canonicalize]] to `expressions`. */ - def apply(expressions: TraversableOnce[Expression]): ExpressionSet = { - val set = new ExpressionSet() - expressions.foreach(set.add) - set - } - - def apply(): ExpressionSet = { - new ExpressionSet() - } -} - -/** - * A [[Set]] where membership is determined based on determinacy and a canonical representation of - * an [[Expression]] (i.e. one that attempts to ignore cosmetic differences). - * See [[Canonicalize]] for more details. - * - * Internally this set uses the canonical representation, but keeps also track of the original - * expressions to ease debugging. Since different expressions can share the same canonical - * representation, this means that operations that extract expressions from this set are only - * guaranteed to see at least one such expression. For example: - * - * {{{ - * val set = ExpressionSet(a + 1, 1 + a) - * - * set.iterator => Iterator(a + 1) - * set.contains(a + 1) => true - * set.contains(1 + a) => true - * set.contains(a + 2) => false - * }}} - * - * For non-deterministic expressions, they are always considered as not contained in the [[Set]]. - * On adding a non-deterministic expression, simply append it to the original expressions. - * This is consistent with how we define `semanticEquals` between two expressions. - * - * The constructor of this class is protected so caller can only initialize an Expression from - * empty, then build it using `add` and `remove` methods. So every instance of this class holds the - * invariant that: - * 1. Every expr `e` in `baseSet` satisfies `e.deterministic && e.canonicalized == e` - * 2. Every deterministic expr `e` in `originals` satisfies that `e.canonicalized` is already - * accessed. - */ -class ExpressionSet protected( - private val baseSet: mutable.Set[Expression] = new mutable.HashSet, - private var originals: mutable.Buffer[Expression] = new ArrayBuffer) - extends scala.collection.Set[Expression] - with scala.collection.SetLike[Expression, ExpressionSet] { - - // Note: this class supports Scala 2.12. A parallel source tree has a 2.13 implementation. - override def empty: ExpressionSet = new ExpressionSet() - - protected def add(e: Expression): Unit = { - if (!e.deterministic) { - originals += e - } else if (!baseSet.contains(e.canonicalized)) { - baseSet.add(e.canonicalized) - originals += e - } - } - - protected def remove(e: Expression): Unit = { - if (e.deterministic) { - baseSet.remove(e.canonicalized) - originals = originals.filter(!_.semanticEquals(e)) - } - } - - override def contains(elem: Expression): Boolean = baseSet.contains(elem.canonicalized) - - override def filter(p: Expression => Boolean): ExpressionSet = { - val newBaseSet = baseSet.filter(e => p(e)) - val newOriginals = originals.filter(e => p(e.canonicalized)) - new ExpressionSet(newBaseSet, newOriginals) - } - - override def filterNot(p: Expression => Boolean): ExpressionSet = { - val newBaseSet = baseSet.filterNot(e => p(e)) - val newOriginals = originals.filterNot(e => p(e.canonicalized)) - new ExpressionSet(newBaseSet, newOriginals) - } - - override def +(elem: Expression): ExpressionSet = { - val newSet = clone() - newSet.add(elem) - newSet - } - - override def -(elem: Expression): ExpressionSet = { - val newSet = clone() - newSet.remove(elem) - newSet - } - - def map(f: Expression => Expression): ExpressionSet = { - val newSet = new ExpressionSet() - this.iterator.foreach(elem => newSet.add(f(elem))) - newSet - } - - def flatMap(f: Expression => Iterable[Expression]): ExpressionSet = { - val newSet = new ExpressionSet() - this.iterator.foreach(f(_).foreach(newSet.add)) - newSet - } - - override def iterator: Iterator[Expression] = originals.iterator - - override def apply(elem: Expression): Boolean = this.contains(elem) - - override def equals(obj: Any): Boolean = obj match { - case other: ExpressionSet => this.baseSet == other.baseSet - case _ => false - } - - override def hashCode(): Int = baseSet.hashCode() - - override def clone(): ExpressionSet = new ExpressionSet(baseSet.clone(), originals.clone()) - - /** - * Returns a string containing both the post [[Canonicalize]] expressions and the original - * expressions in this set. - */ - def toDebugString: String = - s""" - |baseSet: ${baseSet.mkString(", ")} - |originals: ${originals.mkString(", ")} - """.stripMargin -} diff --git a/sql/catalyst/src/main/scala-2.13/org/apache/spark/sql/catalyst/expressions/AttributeMap.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/AttributeMap.scala similarity index 100% rename from sql/catalyst/src/main/scala-2.13/org/apache/spark/sql/catalyst/expressions/AttributeMap.scala rename to sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/AttributeMap.scala diff --git a/sql/catalyst/src/main/scala-2.13/org/apache/spark/sql/catalyst/expressions/ExpressionSet.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ExpressionSet.scala similarity index 100% rename from sql/catalyst/src/main/scala-2.13/org/apache/spark/sql/catalyst/expressions/ExpressionSet.scala rename to sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/ExpressionSet.scala diff --git a/sql/catalyst/src/test/scala-2.12/org/apache/spark/sql/catalyst/analysis/ExtractGeneratorSuite.scala b/sql/catalyst/src/test/scala-2.12/org/apache/spark/sql/catalyst/analysis/ExtractGeneratorSuite.scala deleted file mode 100644 index 7df96a8c64e8..000000000000 --- a/sql/catalyst/src/test/scala-2.12/org/apache/spark/sql/catalyst/analysis/ExtractGeneratorSuite.scala +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.sql.catalyst.analysis - -import org.apache.spark.sql.catalyst.expressions._ -import org.apache.spark.sql.catalyst.plans.logical._ -import org.apache.spark.sql.types._ - -/** - * Note: this test supports Scala 2.12. A parallel source tree has a 2.13 implementation. - */ -class ExtractGeneratorSuite extends AnalysisTest { - - test("SPARK-34141: ExtractGenerator with lazy project list") { - val b = AttributeReference("b", ArrayType(StringType))() - - val columns = AttributeReference("a", StringType)() :: b :: Nil - val explode = Alias(Explode(b), "c")() - - // view is a lazy seq - val rel = LocalRelation(output = columns.view) - val plan = Project(rel.output ++ (explode :: Nil), rel) - - assertAnalysisSuccess(plan) - } -} diff --git a/sql/catalyst/src/test/scala-2.13/org/apache/spark/sql/catalyst/analysis/ExtractGeneratorSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ExtractGeneratorSuite.scala similarity index 100% rename from sql/catalyst/src/test/scala-2.13/org/apache/spark/sql/catalyst/analysis/ExtractGeneratorSuite.scala rename to sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ExtractGeneratorSuite.scala diff --git a/sql/core/pom.xml b/sql/core/pom.xml index bf3caf58fe27..ab7d1c07ee28 100644 --- a/sql/core/pom.xml +++ b/sql/core/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-sql_2.12 + spark-sql_2.13 jar Spark Project SQL https://spark.apache.org/ @@ -89,12 +89,12 @@ test - org.scala-lang.modules scala-parallel-collections_${scala.binary.version} - --> + org.apache.orc orc-core @@ -263,18 +263,6 @@ org.codehaus.mojo build-helper-maven-plugin - - add-sources - generate-sources - - add-source - - - - src/main/scala-${scala.binary.version} - - - add-scala-test-sources generate-test-sources diff --git a/sql/core/src/main/scala-2.12/org/apache/spark/sql/execution/streaming/StreamProgress.scala b/sql/core/src/main/scala-2.12/org/apache/spark/sql/execution/streaming/StreamProgress.scala deleted file mode 100644 index 9e5bb8e061cc..000000000000 --- a/sql/core/src/main/scala-2.12/org/apache/spark/sql/execution/streaming/StreamProgress.scala +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.sql.execution.streaming - -import scala.collection.{immutable, GenTraversableOnce} - -import org.apache.spark.sql.connector.read.streaming.{Offset => OffsetV2, SparkDataStream} - -/** - * A helper class that looks like a Map[Source, Offset]. - */ -class StreamProgress( - val baseMap: immutable.Map[SparkDataStream, OffsetV2] = - new immutable.HashMap[SparkDataStream, OffsetV2]) - extends scala.collection.immutable.Map[SparkDataStream, OffsetV2] { - - // Note: this class supports Scala 2.12. A parallel source tree has a 2.13 implementation. - - def toOffsetSeq(source: Seq[SparkDataStream], metadata: OffsetSeqMetadata): OffsetSeq = { - OffsetSeq(source.map(get), Some(metadata)) - } - - override def toString: String = - baseMap.map { case (k, v) => s"$k: $v"}.mkString("{", ",", "}") - - override def +[B1 >: OffsetV2](kv: (SparkDataStream, B1)): Map[SparkDataStream, B1] = { - baseMap + kv - } - - override def get(key: SparkDataStream): Option[OffsetV2] = baseMap.get(key) - - override def iterator: Iterator[(SparkDataStream, OffsetV2)] = baseMap.iterator - - override def -(key: SparkDataStream): Map[SparkDataStream, OffsetV2] = baseMap - key - - def ++(updates: GenTraversableOnce[(SparkDataStream, OffsetV2)]): StreamProgress = { - new StreamProgress(baseMap ++ updates) - } -} diff --git a/sql/core/src/main/scala-2.13/org/apache/spark/sql/execution/streaming/StreamProgress.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamProgress.scala similarity index 100% rename from sql/core/src/main/scala-2.13/org/apache/spark/sql/execution/streaming/StreamProgress.scala rename to sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamProgress.scala diff --git a/sql/hive-thriftserver/pom.xml b/sql/hive-thriftserver/pom.xml index 76a1037f1cba..4d7b6696b0c2 100644 --- a/sql/hive-thriftserver/pom.xml +++ b/sql/hive-thriftserver/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-hive-thriftserver_2.12 + spark-hive-thriftserver_2.13 jar Spark Project Hive Thrift Server https://spark.apache.org/ @@ -61,12 +61,12 @@ test-jar test - org.scala-lang.modules scala-parallel-collections_${scala.binary.version} - --> + com.google.guava guava diff --git a/sql/hive/pom.xml b/sql/hive/pom.xml index 7a12c6222eb4..adcf0f55dcc1 100644 --- a/sql/hive/pom.xml +++ b/sql/hive/pom.xml @@ -21,12 +21,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../../pom.xml - spark-hive_2.12 + spark-hive_2.13 jar Spark Project Hive https://spark.apache.org/ @@ -79,12 +79,12 @@ test-jar test - org.scala-lang.modules scala-parallel-collections_${scala.binary.version} - --> + ${hive.group} hive-common diff --git a/streaming/pom.xml b/streaming/pom.xml index b36289e4e494..673f719f2cb9 100644 --- a/streaming/pom.xml +++ b/streaming/pom.xml @@ -20,12 +20,12 @@ 4.0.0 org.apache.spark - spark-parent_2.12 + spark-parent_2.13 4.0.0-SNAPSHOT ../pom.xml - spark-streaming_2.12 + spark-streaming_2.13 streaming @@ -50,12 +50,12 @@ org.apache.spark spark-tags_${scala.binary.version} - org.scala-lang.modules scala-parallel-collections_${scala.binary.version} - --> +