Skip to content

Commit f0c9318

Browse files
committed
Fix more scalastyle warnings in yarn
1 parent 80bf4c3 commit f0c9318

3 files changed

Lines changed: 11 additions & 7 deletions

File tree

yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,8 @@ class ApplicationMaster(args: ApplicationMasterArguments, conf: Configuration,
137137
System.getenv(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV)
138138

139139
val params = "PROXY_HOST=" + parts(0) + "," + "PROXY_URI_BASE=" + uriBase
140-
System.setProperty("spark.org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter.params", params)
140+
System.setProperty(
141+
"spark.org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter.params", params)
141142
}
142143

143144
/** Get the Yarn approved local directories. */

yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/ExecutorLauncher.scala

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,8 @@ class ExecutorLauncher(args: ApplicationMasterArguments, conf: Configuration, sp
6565
override def preStart() {
6666
logInfo("Listen to driver: " + driverUrl)
6767
driver = context.actorSelection(driverUrl)
68-
// Send a hello message thus the connection is actually established, thus we can monitor Lifecycle Events.
68+
// Send a hello message thus the connection is actually established,
69+
// thus we can monitor Lifecycle Events.
6970
driver ! "Hello"
7071
context.system.eventStream.subscribe(self, classOf[RemotingLifecycleEvent])
7172
}
@@ -95,8 +96,9 @@ class ExecutorLauncher(args: ApplicationMasterArguments, conf: Configuration, sp
9596
// Allocate all containers
9697
allocateExecutors()
9798

98-
// Launch a progress reporter thread, else app will get killed after expiration (def: 10mins) timeout
99-
// ensure that progress is sent before YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS elapse.
99+
// Launch a progress reporter thread, else app will get killed after expiration
100+
// (def: 10mins) timeout ensure that progress is sent before
101+
// YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS elapse.
100102

101103
val timeoutInterval = yarnConf.getInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 120000)
102104
// we want to be reasonably responsive without causing too many requests to RM.

yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -276,7 +276,8 @@ private[yarn] class YarnAllocationHandler(
276276
allocatedRackCount.put(rack, allocatedRackCount.getOrElse(rack, 0) + 1)
277277
}
278278
}
279-
logInfo("Launching ExecutorRunnable. driverUrl: %s, executorHostname: %s".format(driverUrl, executorHostname))
279+
logInfo("Launching ExecutorRunnable. driverUrl: %s, executorHostname: %s".format(
280+
driverUrl, executorHostname))
280281
val executorRunnable = new ExecutorRunnable(
281282
container,
282283
conf,
@@ -314,8 +315,8 @@ private[yarn] class YarnAllocationHandler(
314315
// `pendingReleaseContainers`.
315316
pendingReleaseContainers.remove(containerId)
316317
} else {
317-
// Decrement the number of executors running. The next iteration of the ApplicationMaster's
318-
// reporting thread will take care of allocating.
318+
// Decrement the number of executors running. The next iteration of
319+
// the ApplicationMaster's reporting thread will take care of allocating.
319320
numExecutorsRunning.decrementAndGet()
320321
logInfo("Completed container %s (state: %s, exit status: %s)".format(
321322
containerId,

0 commit comments

Comments
 (0)