diff --git a/core/src/main/resources/org/apache/spark/ui/static/executorspage.js b/core/src/main/resources/org/apache/spark/ui/static/executorspage.js index b7fbe0492b6d7..bf6af9b2cc922 100644 --- a/core/src/main/resources/org/apache/spark/ui/static/executorspage.js +++ b/core/src/main/resources/org/apache/spark/ui/static/executorspage.js @@ -126,7 +126,6 @@ function totalDurationAlpha(totalGCTime, totalDuration) { (Math.min(totalGCTime / totalDuration + 0.5, 1)) : 1; } -// When GCTimePercent is edited change ToolTips.TASK_TIME to match var GCTimePercent = 0.1; function totalDurationStyle(totalGCTime, totalDuration) { diff --git a/core/src/main/scala/org/apache/spark/deploy/history/ApplicationCache.scala b/core/src/main/scala/org/apache/spark/deploy/history/ApplicationCache.scala index 829631a04546e..909f5ea937cee 100644 --- a/core/src/main/scala/org/apache/spark/deploy/history/ApplicationCache.scala +++ b/core/src/main/scala/org/apache/spark/deploy/history/ApplicationCache.scala @@ -394,7 +394,6 @@ private[history] class ApplicationCacheCheckFilter( val httpRequest = request.asInstanceOf[HttpServletRequest] val httpResponse = response.asInstanceOf[HttpServletResponse] val requestURI = httpRequest.getRequestURI - val operation = httpRequest.getMethod // if the request is for an attempt, check to see if it is in need of delete/refresh // and have the cache update the UI if so diff --git a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala index 0d905b46953c0..cad107256c58c 100644 --- a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala @@ -404,9 +404,6 @@ private[spark] object HadoopRDD extends Logging { */ val CONFIGURATION_INSTANTIATION_LOCK = new Object() - /** Update the input bytes read metric each time this number of records has been read */ - val RECORDS_BETWEEN_BYTES_READ_METRIC_UPDATES = 256 - /** * The three methods below are helpers for accessing the local map, a property of the SparkEnv of * the local process. diff --git a/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala b/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala index d8119fb949847..9582bdbf52641 100644 --- a/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala +++ b/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala @@ -590,11 +590,6 @@ private class ProxyRedirectHandler(_proxyUri: String) extends HandlerWrapper { override def sendRedirect(location: String): Unit = { val newTarget = if (location != null) { val target = new URI(location) - val path = if (target.getPath().startsWith("/")) { - target.getPath() - } else { - req.getRequestURI().stripSuffix("/") + "/" + target.getPath() - } // The target path should already be encoded, so don't re-encode it, just the // proxy address part. val proxyBase = UIUtils.uiRoot(req) diff --git a/core/src/main/scala/org/apache/spark/ui/ToolTips.scala b/core/src/main/scala/org/apache/spark/ui/ToolTips.scala index 587046676ff13..b80fba396b33a 100644 --- a/core/src/main/scala/org/apache/spark/ui/ToolTips.scala +++ b/core/src/main/scala/org/apache/spark/ui/ToolTips.scala @@ -35,10 +35,6 @@ private[spark] object ToolTips { val OUTPUT = "Bytes written to Hadoop." - val STORAGE_MEMORY = - "Memory used / total available memory for storage of data " + - "like RDD partitions cached in memory. " - val SHUFFLE_WRITE = "Bytes and records written to disk in order to be read by a shuffle in a future stage." @@ -88,9 +84,6 @@ private[spark] object ToolTips { also create multiple RDDs internally. Cached RDDs are shown in green. """ - val TASK_TIME = - "Shaded red when garbage collection (GC) time is over 10% of task time" - val APPLICATION_EXECUTOR_LIMIT = """Maximum number of executors that this application will use. This limit is finite only when dynamic allocation is enabled. The number of granted executors may exceed the limit