Skip to content
Closed
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 10 additions & 6 deletions core/src/main/scala/org/apache/spark/ui/storage/RDDPage.scala
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import javax.servlet.http.HttpServletRequest
import scala.xml.{Node, Unparsed}

import org.apache.spark.status.AppStatusStore
import org.apache.spark.status.api.v1.{RDDDataDistribution, RDDPartitionInfo}
import org.apache.spark.status.api.v1.{ExecutorSummary, RDDDataDistribution, RDDPartitionInfo}
import org.apache.spark.ui._
import org.apache.spark.util.Utils

Expand Down Expand Up @@ -76,7 +76,8 @@ private[ui] class RDDPage(parent: SparkUITab, store: AppStatusStore) extends Web
rddStorageInfo.partitions.get,
blockPageSize,
blockSortColumn,
blockSortDesc)
blockSortDesc,
store.executorList(false))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should this only list active executors?

Theoretically you shouldn't have any blocks stored on dead executors.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As we have a fallback to executorId we can do that.

_blockTable.table(page)
} catch {
case e @ (_ : IllegalArgumentException | _ : IndexOutOfBoundsException) =>
Expand Down Expand Up @@ -182,7 +183,8 @@ private[ui] class BlockDataSource(
rddPartitions: Seq[RDDPartitionInfo],
pageSize: Int,
sortColumn: String,
desc: Boolean) extends PagedDataSource[BlockTableRowData](pageSize) {
desc: Boolean,
executorIdToAddress: Map[String, String]) extends PagedDataSource[BlockTableRowData](pageSize) {

private val data = rddPartitions.map(blockRow).sorted(ordering(sortColumn, desc))

Expand All @@ -198,7 +200,7 @@ private[ui] class BlockDataSource(
rddPartition.storageLevel,
rddPartition.memoryUsed,
rddPartition.diskUsed,
rddPartition.executors.mkString(" "))
rddPartition.executors.map(id => executorIdToAddress.get(id).getOrElse(id)).mkString(" "))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

.map { id => ... }

Also in other places.

}

/**
Expand Down Expand Up @@ -226,7 +228,8 @@ private[ui] class BlockPagedTable(
rddPartitions: Seq[RDDPartitionInfo],
pageSize: Int,
sortColumn: String,
desc: Boolean) extends PagedTable[BlockTableRowData] {
desc: Boolean,
executorSummaries: Seq[ExecutorSummary]) extends PagedTable[BlockTableRowData] {

override def tableId: String = "rdd-storage-by-block-table"

Expand All @@ -243,7 +246,8 @@ private[ui] class BlockPagedTable(
rddPartitions,
pageSize,
sortColumn,
desc)
desc,
executorSummaries.map(ex => (ex.id, ex.hostPort)).toMap)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Probably a good idea to keep the map sorted by the executor id, somehow.


override def pageLink(page: Int): String = {
val encodedSortColumn = URLEncoder.encode(sortColumn, "UTF-8")
Expand Down