Skip to content

Commit aefae0f

Browse files
committed
Address comments
1 parent c18a9b6 commit aefae0f

5 files changed

Lines changed: 7 additions & 7 deletions

File tree

core/src/main/scala/org/apache/spark/rpc/RpcEndpoint.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,9 @@ private[spark] trait RpcEnvFactory {
3333
*
3434
* It is guaranteed that `onStart`, `receive` and `onStop` will be called in sequence.
3535
*
36-
* The life-cycle of an endpoint is as below in an order:
36+
* The life-cycle of an endpoint is:
3737
*
38-
* constructor, onStart, receive* and onStop
38+
* {@code constructor -> onStart -> receive* -> onStop}
3939
*
4040
* Note: `receive` can be called concurrently. If you want `receive` to be thread-safe, please use
4141
* [[ThreadSafeRpcEndpoint]]

core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -708,8 +708,8 @@ private[spark] object TaskSchedulerImpl {
708708
* offers are ordered such that we'll allocate one container on each host before allocating a
709709
* second container on any host, and so on, in order to reduce the damage if a host fails.
710710
*
711-
* For example, given a map consisting of h1 to [o1, o2, o3], h2 to [o4] and h3 to [o5, o6],
712-
* returns a list, [o1, o5, o4, o2, o6, o3].
711+
* For example, given {@literal <h1, [o1, o2, o3]>}, {@literal <h2, [o4]>} and
712+
* {@literal <h3, [o5, o6]>}, returns {@literal [o1, o5, o4, o2, o6, o3]}.
713713
*/
714714
def prioritizeContainers[K, T] (map: HashMap[K, ArrayBuffer[T]]): List[T] = {
715715
val _keyList = new ArrayBuffer[K](map.size)

mllib/src/main/scala/org/apache/spark/ml/classification/Classifier.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ abstract class Classifier[
7474
* and features (`Vector`).
7575
* @param numClasses Number of classes label can take. Labels must be integers in the range
7676
* [0, numClasses).
77-
* @note Throws `SparkException` if any label is not an integer is greater than or equal to 0
77+
* @note Throws `SparkException` if any label is a non-integer or is negative
7878
*/
7979
protected def extractLabeledPoints(dataset: Dataset[_], numClasses: Int): RDD[LabeledPoint] = {
8080
require(numClasses > 0, s"Classifier (in extractLabeledPoints) found numClasses =" +

resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ trait MesosSchedulerUtils extends Logging {
296296

297297
/**
298298
* Parses the attributes constraints provided to spark and build a matching data struct:
299-
* {@code Map[<attribute-name>, Set[values-to-match]}
299+
* {@literal Map[<attribute-name>, Set[values-to-match]}
300300
* The constraints are specified as ';' separated key-value pairs where keys and values
301301
* are separated by ':'. The ':' implies equality (for singular values) and "is one of" for
302302
* multiple values (comma separated). For example:

sql/core/src/main/scala/org/apache/spark/sql/catalog/Catalog.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -510,7 +510,7 @@ abstract class Catalog {
510510
def refreshTable(tableName: String): Unit
511511

512512
/**
513-
* Invalidates and refreshes all the cached data (and the associated metadata) for any [[Dataset]]
513+
* Invalidates and refreshes all the cached data (and the associated metadata) for any `Dataset`
514514
* that contains the given data source path. Path matching is by prefix, i.e. "/" would invalidate
515515
* everything that is cached.
516516
*

0 commit comments

Comments
 (0)