-
Notifications
You must be signed in to change notification settings - Fork 29k
[SPARK-22087][SPARK-14650][WIP][BUILD][REPL][CORE] Compile Spark REPL for Scala 2.12 + other 2.12 fixes #19307
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -450,10 +450,9 @@ private[deploy] class Worker( | |
| } | ||
| }(cleanupThreadExecutor) | ||
|
|
||
| cleanupFuture.onFailure { | ||
| case e: Throwable => | ||
| logError("App dir cleanup failed: " + e.getMessage, e) | ||
| }(cleanupThreadExecutor) | ||
| cleanupFuture.failed.foreach(e => | ||
|
||
| logError("App dir cleanup failed: " + e.getMessage, e) | ||
| )(cleanupThreadExecutor) | ||
|
|
||
| case MasterChanged(masterRef, masterWebUiUrl) => | ||
| logInfo("Master has changed, new master is at " + masterRef.address.toSparkURL) | ||
|
|
@@ -622,10 +621,9 @@ private[deploy] class Worker( | |
| dirList.foreach { dir => | ||
| Utils.deleteRecursively(new File(dir)) | ||
| } | ||
| }(cleanupThreadExecutor).onFailure { | ||
| case e: Throwable => | ||
| logError(s"Clean up app dir $dirList failed: ${e.getMessage}", e) | ||
| }(cleanupThreadExecutor) | ||
| }(cleanupThreadExecutor).failed.foreach(e => | ||
| logError(s"Clean up app dir $dirList failed: ${e.getMessage}", e) | ||
| )(cleanupThreadExecutor) | ||
| } | ||
| shuffleService.applicationRemoved(id) | ||
| } | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -128,9 +128,9 @@ class DoubleRDDFunctions(self: RDD[Double]) extends Logging with Serializable { | |
| } | ||
| // Compute the minimum and the maximum | ||
| val (max: Double, min: Double) = self.mapPartitions { items => | ||
| Iterator(items.foldRight(Double.NegativeInfinity, | ||
| Double.PositiveInfinity)((e: Double, x: (Double, Double)) => | ||
| (x._1.max(e), x._2.min(e)))) | ||
| Iterator( | ||
| items.foldRight((Double.NegativeInfinity, Double.PositiveInfinity) | ||
|
||
| )((e: Double, x: (Double, Double)) => (x._1.max(e), x._2.min(e)))) | ||
| }.reduce { (maxmin1, maxmin2) => | ||
| (maxmin1._1.max(maxmin2._1), maxmin1._2.min(maxmin2._2)) | ||
| } | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -24,7 +24,7 @@ import java.util.concurrent.atomic.AtomicInteger | |
|
|
||
| import scala.annotation.tailrec | ||
| import scala.collection.Map | ||
| import scala.collection.mutable.{HashMap, HashSet, Stack} | ||
| import scala.collection.mutable.{ArrayStack, HashMap, HashSet} | ||
| import scala.concurrent.duration._ | ||
| import scala.language.existentials | ||
| import scala.language.postfixOps | ||
|
|
@@ -396,12 +396,12 @@ class DAGScheduler( | |
|
|
||
| /** Find ancestor shuffle dependencies that are not registered in shuffleToMapStage yet */ | ||
| private def getMissingAncestorShuffleDependencies( | ||
| rdd: RDD[_]): Stack[ShuffleDependency[_, _, _]] = { | ||
| val ancestors = new Stack[ShuffleDependency[_, _, _]] | ||
| rdd: RDD[_]): ArrayStack[ShuffleDependency[_, _, _]] = { | ||
|
||
| val ancestors = new ArrayStack[ShuffleDependency[_, _, _]] | ||
| val visited = new HashSet[RDD[_]] | ||
| // We are manually maintaining a stack here to prevent StackOverflowError | ||
| // caused by recursively visiting | ||
| val waitingForVisit = new Stack[RDD[_]] | ||
| val waitingForVisit = new ArrayStack[RDD[_]] | ||
| waitingForVisit.push(rdd) | ||
| while (waitingForVisit.nonEmpty) { | ||
| val toVisit = waitingForVisit.pop() | ||
|
|
@@ -434,7 +434,7 @@ class DAGScheduler( | |
| rdd: RDD[_]): HashSet[ShuffleDependency[_, _, _]] = { | ||
| val parents = new HashSet[ShuffleDependency[_, _, _]] | ||
| val visited = new HashSet[RDD[_]] | ||
| val waitingForVisit = new Stack[RDD[_]] | ||
| val waitingForVisit = new ArrayStack[RDD[_]] | ||
| waitingForVisit.push(rdd) | ||
| while (waitingForVisit.nonEmpty) { | ||
| val toVisit = waitingForVisit.pop() | ||
|
|
@@ -456,7 +456,7 @@ class DAGScheduler( | |
| val visited = new HashSet[RDD[_]] | ||
| // We are manually maintaining a stack here to prevent StackOverflowError | ||
| // caused by recursively visiting | ||
| val waitingForVisit = new Stack[RDD[_]] | ||
| val waitingForVisit = new ArrayStack[RDD[_]] | ||
| def visit(rdd: RDD[_]) { | ||
| if (!visited(rdd)) { | ||
| visited += rdd | ||
|
|
@@ -1633,7 +1633,7 @@ class DAGScheduler( | |
| val visitedRdds = new HashSet[RDD[_]] | ||
| // We are manually maintaining a stack here to prevent StackOverflowError | ||
| // caused by recursively visiting | ||
| val waitingForVisit = new Stack[RDD[_]] | ||
| val waitingForVisit = new ArrayStack[RDD[_]] | ||
| def visit(rdd: RDD[_]) { | ||
| if (!visitedRdds(rdd)) { | ||
| visitedRdds += rdd | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -501,8 +501,8 @@ private class JavaIterableWrapperSerializer | |
| private object JavaIterableWrapperSerializer extends Logging { | ||
| // The class returned by JavaConverters.asJava | ||
| // (scala.collection.convert.Wrappers$IterableWrapper). | ||
| val wrapperClass = | ||
| scala.collection.convert.WrapAsJava.asJavaIterable(Seq(1)).getClass | ||
| import scala.collection.JavaConverters._ | ||
| val wrapperClass = Seq(1).asJava.getClass | ||
|
||
|
|
||
| // Get the underlying method so we can use it to get the Scala collection for serialization. | ||
| private val underlyingMethodOpt = { | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -83,11 +83,12 @@ class SparkSubmitUtilsSuite extends SparkFunSuite with BeforeAndAfterAll { | |
| val resolver = settings.getDefaultResolver.asInstanceOf[ChainResolver] | ||
| assert(resolver.getResolvers.size() === 4) | ||
| val expected = repos.split(",").map(r => s"$r/") | ||
| resolver.getResolvers.toArray.zipWithIndex.foreach { case (resolver: AbstractResolver, i) => | ||
| if (1 < i && i < 3) { | ||
| assert(resolver.getName === s"repo-$i") | ||
| assert(resolver.asInstanceOf[IBiblioResolver].getRoot === expected(i - 1)) | ||
| } | ||
| resolver.getResolvers.toArray.map(_.asInstanceOf[AbstractResolver]).zipWithIndex.foreach { | ||
|
||
| case (r, i) => | ||
| if (1 < i && i < 3) { | ||
| assert(r.getName === s"repo-$i") | ||
| assert(r.asInstanceOf[IBiblioResolver].getRoot === expected(i - 1)) | ||
| } | ||
| } | ||
| } | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -625,6 +625,8 @@ class BasicSchedulerIntegrationSuite extends SchedulerIntegrationSuite[SingleCor | |
| backend.taskFailed(taskDescription, fetchFailed) | ||
| case (1, _, partition) => | ||
| backend.taskSuccess(taskDescription, 42 + partition) | ||
| case unmatched => | ||
|
||
| fail(s"Unexpected shuffle output $unmatched") | ||
| } | ||
| } | ||
| withBackend(runBackend _) { | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
These changes avoid warnings about eta-expansion of zero-arg methods. It works fine in 2.11 as well; just not relying on syntactic sugar for the same.