diff --git a/core/src/main/java/org/apache/spark/io/NioBufferedFileInputStream.java b/core/src/main/java/org/apache/spark/io/NioBufferedFileInputStream.java index 92bf0ecc1b5c..a1e29a8c873d 100644 --- a/core/src/main/java/org/apache/spark/io/NioBufferedFileInputStream.java +++ b/core/src/main/java/org/apache/spark/io/NioBufferedFileInputStream.java @@ -51,7 +51,6 @@ public NioBufferedFileInputStream(File file) throws IOException { /** * Checks weather data is left to be read from the input stream. * @return true if data is left, false otherwise - * @throws IOException */ private boolean refill() throws IOException { if (!byteBuffer.hasRemaining()) { diff --git a/core/src/main/java/org/apache/spark/memory/MemoryConsumer.java b/core/src/main/java/org/apache/spark/memory/MemoryConsumer.java index 4bfd2d358f36..7b68b399e6e8 100644 --- a/core/src/main/java/org/apache/spark/memory/MemoryConsumer.java +++ b/core/src/main/java/org/apache/spark/memory/MemoryConsumer.java @@ -78,7 +78,6 @@ public void spill() throws IOException { * @param size the amount of memory should be released * @param trigger the MemoryConsumer that trigger this spilling * @return the amount of released memory in bytes - * @throws IOException */ public abstract long spill(long size, MemoryConsumer trigger) throws IOException; diff --git a/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java b/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java index 024756087bf7..833744f4777c 100644 --- a/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java +++ b/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java @@ -423,7 +423,6 @@ public void insertRecord(Object recordBase, long recordOffset, int length, int p * * @return metadata for the spill files written by this sorter. If no records were ever inserted * into this sorter, then this will return an empty array. - * @throws IOException */ public SpillInfo[] closeAndGetSpills() throws IOException { if (inMemSorter != null) { diff --git a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java index 1b206c11d9a8..55e4e609c3c7 100644 --- a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java +++ b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java @@ -447,8 +447,6 @@ public void insertKVRecord(Object keyBase, long keyOffset, int keyLen, /** * Merges another UnsafeExternalSorters into this one, the other one will be emptied. - * - * @throws IOException */ public void merge(UnsafeExternalSorter other) throws IOException { other.spill(); diff --git a/core/src/main/scala/org/apache/spark/deploy/history/ApplicationCache.scala b/core/src/main/scala/org/apache/spark/deploy/history/ApplicationCache.scala index 8c63fa65b40f..fb2a67c2ab10 100644 --- a/core/src/main/scala/org/apache/spark/deploy/history/ApplicationCache.scala +++ b/core/src/main/scala/org/apache/spark/deploy/history/ApplicationCache.scala @@ -209,9 +209,8 @@ private[history] class ApplicationCache( /** * Register a filter for the web UI which checks for updates to the given app/attempt - * @param ui Spark UI to attach filters to - * @param appId application ID - * @param attemptId attempt ID + * @param key consisted of appId and attemptId + * @param loadedUI Spark UI to attach filters to */ private def registerFilter(key: CacheKey, loadedUI: LoadedAppUI): Unit = { require(loadedUI != null) @@ -231,7 +230,7 @@ private[history] class ApplicationCache( /** * An entry in the cache. * - * @param ui Spark UI + * @param loadedUI Spark UI * @param completed Flag to indicated that the application has completed (and so * does not need refreshing). */ diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSetBlacklist.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSetBlacklist.scala index b680979a466a..4df2889089ee 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/TaskSetBlacklist.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSetBlacklist.scala @@ -69,7 +69,6 @@ private[scheduler] class TaskSetBlacklist( /** * Get the most recent failure reason of this TaskSet. - * @return */ def getLatestFailureReason: String = { latestFailureReason diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala index 4b71dc1fff34..0dfadb657b77 100644 --- a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala +++ b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala @@ -853,7 +853,6 @@ private[spark] class BlockManager( * @param bufferTransformer this transformer expected to open the file if the block is backed by a * file by this it is guaranteed the whole content can be loaded * @tparam T result type - * @return */ private[spark] def getRemoteBlock[T]( blockId: BlockId, diff --git a/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala b/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala index 06993712035f..f60b3b8db194 100644 --- a/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala +++ b/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala @@ -285,7 +285,6 @@ trait MesosSchedulerUtils extends Logging { * The attribute values are the mesos attribute types and they are * * @param offerAttributes the attributes offered - * @return */ protected def toAttributeMap(offerAttributes: JList[Attribute]) : Map[String, GeneratedMessageV3] = { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/ExplainUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/ExplainUtils.scala index 18a7f9822dcb..fc384fe117ca 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/ExplainUtils.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/ExplainUtils.scala @@ -41,7 +41,7 @@ object ExplainUtils { * * @param plan Input query plan to process * @param append function used to append the explain output - * @param startOperationID The start value of operation id. The subsequent operations will + * @param startOperatorID The start value of operation id. The subsequent operations will * be assigned higher value. * * @return The last generated operation id for this input plan. This is to ensure we @@ -125,7 +125,7 @@ object ExplainUtils { * appear in the explain output. * 2. operator identifier starts at startOperatorID + 1 * @param plan Input query plan to process - * @param startOperationID The start value of operation id. The subsequent operations will + * @param startOperatorID The start value of operation id. The subsequent operations will * be assigned higher value. * @param operatorIDs A output parameter that contains a map of operator id and query plan. This * is used by caller to print the detail portion of the plan.