diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java index 47692ec982890..f84a1978de1ad 100644 --- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java +++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java @@ -67,7 +67,7 @@ public static void main(String[] args) throws Exception { JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, new Duration(1000)); // Create an input stream with the custom receiver on target ip:port and count the - // words in input stream of \n delimited text (eg. generated by 'nc') + // words in input stream of \n delimited text (e.g. generated by 'nc') JavaReceiverInputDStream lines = ssc.receiverStream( new JavaCustomReceiver(args[0], Integer.parseInt(args[1]))); JavaDStream words = lines.flatMap(x -> Arrays.asList(SPACE.split(x)).iterator()); diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java index b217672def88e..d56134bd99e36 100644 --- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java +++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java @@ -57,7 +57,7 @@ public static void main(String[] args) throws Exception { JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(1)); // Create a JavaReceiverInputDStream on target ip:port and count the - // words in input stream of \n delimited text (eg. generated by 'nc') + // words in input stream of \n delimited text (e.g. generated by 'nc') // Note that no duplication in storage level only for running locally. // Replication necessary in distributed scenario for fault tolerance. JavaReceiverInputDStream lines = ssc.socketTextStream( diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java index 45a876decff8b..c3543237fc7a8 100644 --- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java +++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java @@ -126,7 +126,7 @@ private static JavaStreamingContext createContext(String ip, ssc.checkpoint(checkpointDirectory); // Create a socket stream on target ip:port and count the - // words in input stream of \n delimited text (eg. generated by 'nc') + // words in input stream of \n delimited text (e.g. generated by 'nc') JavaReceiverInputDStream lines = ssc.socketTextStream(ip, port); JavaDStream words = lines.flatMap(x -> Arrays.asList(SPACE.split(x)).iterator()); JavaPairDStream wordCounts = words.mapToPair(s -> new Tuple2<>(s, 1)) diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java index 948d1a2111780..5d30698c93372 100644 --- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java +++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaSqlNetworkWordCount.java @@ -59,7 +59,7 @@ public static void main(String[] args) throws Exception { JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(1)); // Create a JavaReceiverInputDStream on target ip:port and count the - // words in input stream of \n delimited text (eg. generated by 'nc') + // words in input stream of \n delimited text (e.g. generated by 'nc') // Note that no duplication in storage level only for running locally. // Replication necessary in distributed scenario for fault tolerance. JavaReceiverInputDStream lines = ssc.socketTextStream( diff --git a/examples/src/main/python/ml/train_validation_split.py b/examples/src/main/python/ml/train_validation_split.py index d4f9184bf576e..5e3dc7b3ec2fa 100644 --- a/examples/src/main/python/ml/train_validation_split.py +++ b/examples/src/main/python/ml/train_validation_split.py @@ -17,7 +17,7 @@ """ This example demonstrates applying TrainValidationSplit to split data -and preform model selection. +and perform model selection. Run with: bin/spark-submit examples/src/main/python/ml/train_validation_split.py diff --git a/examples/src/main/python/streaming/recoverable_network_wordcount.py b/examples/src/main/python/streaming/recoverable_network_wordcount.py index 60167dc772544..0ac7d4855b7a8 100644 --- a/examples/src/main/python/streaming/recoverable_network_wordcount.py +++ b/examples/src/main/python/streaming/recoverable_network_wordcount.py @@ -68,7 +68,7 @@ def createContext(host, port, outputPath): ssc = StreamingContext(sc, 1) # Create a socket stream on target ip:port and count the - # words in input stream of \n delimited text (eg. generated by 'nc') + # words in input stream of \n delimited text (e.g. generated by 'nc') lines = ssc.socketTextStream(host, port) words = lines.flatMap(lambda line: line.split(" ")) wordCounts = words.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x + y) diff --git a/examples/src/main/python/streaming/sql_network_wordcount.py b/examples/src/main/python/streaming/sql_network_wordcount.py index ab3cfc067994d..22fe26073a690 100644 --- a/examples/src/main/python/streaming/sql_network_wordcount.py +++ b/examples/src/main/python/streaming/sql_network_wordcount.py @@ -54,7 +54,7 @@ def getSparkSessionInstance(sparkConf): ssc = StreamingContext(sc, 1) # Create a socket stream on target ip:port and count the - # words in input stream of \n delimited text (eg. generated by 'nc') + # words in input stream of \n delimited text (e.g. generated by 'nc') lines = ssc.socketTextStream(host, int(port)) words = lines.flatMap(lambda line: line.split(" ")) diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala index fc3f8fa53c7ae..357f019d380f2 100644 --- a/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala +++ b/examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala @@ -50,7 +50,7 @@ object CustomReceiver { val ssc = new StreamingContext(sparkConf, Seconds(1)) // Create an input stream with the custom receiver on target ip:port and count the - // words in input stream of \n delimited text (eg. generated by 'nc') + // words in input stream of \n delimited text (e.g. generated by 'nc') val lines = ssc.receiverStream(new CustomReceiver(args(0), args(1).toInt)) val words = lines.flatMap(_.split(" ")) val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _) diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala index 15b57fccb4076..b8cb1bd10c668 100644 --- a/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala +++ b/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala @@ -47,7 +47,7 @@ object NetworkWordCount { val ssc = new StreamingContext(sparkConf, Seconds(1)) // Create a socket stream on target ip:port and count the - // words in input stream of \n delimited text (eg. generated by 'nc') + // words in input stream of \n delimited text (e.g. generated by 'nc') // Note that no duplication in storage level only for running locally. // Replication necessary in distributed scenario for fault tolerance. val lines = ssc.socketTextStream(args(0), args(1).toInt, StorageLevel.MEMORY_AND_DISK_SER) diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala index f018f3a26d2e9..f7fe9b0142afd 100644 --- a/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala +++ b/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala @@ -112,7 +112,7 @@ object RecoverableNetworkWordCount { ssc.checkpoint(checkpointDirectory) // Create a socket stream on target ip:port and count the - // words in input stream of \n delimited text (eg. generated by 'nc') + // words in input stream of \n delimited text (e.g. generated by 'nc') val lines = ssc.socketTextStream(ip, port) val words = lines.flatMap(_.split(" ")) val wordCounts = words.map((_, 1)).reduceByKey(_ + _) diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala index 787bbec73b28f..a4a78525da1f5 100644 --- a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala +++ b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala @@ -51,7 +51,7 @@ object SqlNetworkWordCount { val ssc = new StreamingContext(sparkConf, Seconds(2)) // Create a socket stream on target ip:port and count the - // words in input stream of \n delimited text (eg. generated by 'nc') + // words in input stream of \n delimited text (e.g. generated by 'nc') // Note that no duplication in storage level only for running locally. // Replication necessary in distributed scenario for fault tolerance. val lines = ssc.socketTextStream(args(0), args(1).toInt, StorageLevel.MEMORY_AND_DISK_SER) diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala index 2811e67009fb0..40f7441373e94 100644 --- a/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala +++ b/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala @@ -52,7 +52,7 @@ object StatefulNetworkWordCount { val initialRDD = ssc.sparkContext.parallelize(List(("hello", 1), ("world", 1))) // Create a ReceiverInputDStream on target ip:port and count the - // words in input stream of \n delimited test (eg. generated by 'nc') + // words in input stream of \n delimited test (e.g. generated by 'nc') val lines = ssc.socketTextStream(args(0), args(1).toInt) val words = lines.flatMap(_.split(" ")) val wordDstream = words.map(x => (x, 1)) diff --git a/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala b/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala index 609696bc8a2c7..3b01f219565ef 100644 --- a/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala +++ b/external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/DockerJDBCIntegrationSuite.scala @@ -46,7 +46,7 @@ abstract class DatabaseOnDocker { val env: Map[String, String] /** - * Wheather or not to use ipc mode for shared memory when starting docker image + * Whether or not to use ipc mode for shared memory when starting docker image */ val usesIpc: Boolean diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaContinuousSourceSuite.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaContinuousSourceSuite.scala index 649cb725d7d17..4b1be504e9685 100644 --- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaContinuousSourceSuite.scala +++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaContinuousSourceSuite.scala @@ -33,7 +33,7 @@ class KafkaContinuousSourceSuite extends KafkaSourceSuiteBase with KafkaContinuo withTable(table) { val topic = newTopic() testUtils.createTopic(topic) - testUtils.withTranscationalProducer { producer => + testUtils.withTransactionalProducer { producer => val df = spark .readStream .format("kafka") @@ -99,7 +99,7 @@ class KafkaContinuousSourceSuite extends KafkaSourceSuiteBase with KafkaContinuo withTable(table) { val topic = newTopic() testUtils.createTopic(topic) - testUtils.withTranscationalProducer { producer => + testUtils.withTransactionalProducer { producer => val df = spark .readStream .format("kafka") diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala index 36da2f13a6159..fd61f3d33a5ac 100644 --- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala +++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala @@ -544,7 +544,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase { val rows = spark.table("kafkaWatermark").collect() assert(rows.length === 1, s"Unexpected results: ${rows.toList}") val row = rows(0) - // We cannot check the exact window start time as it depands on the time that messages were + // We cannot check the exact window start time as it depends on the time that messages were // inserted by the producer. So here we just use a low bound to make sure the internal // conversion works. assert( @@ -740,7 +740,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase { val topicPartition = new TopicPartition(topic, 0) // The message values are the same as their offsets to make the test easy to follow - testUtils.withTranscationalProducer { producer => + testUtils.withTransactionalProducer { producer => testStream(mapped)( StartStream(ProcessingTime(100), clock), waitUntilBatchProcessed, @@ -863,7 +863,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase { val topicPartition = new TopicPartition(topic, 0) // The message values are the same as their offsets to make the test easy to follow - testUtils.withTranscationalProducer { producer => + testUtils.withTransactionalProducer { producer => testStream(mapped)( StartStream(ProcessingTime(100), clock), waitUntilBatchProcessed, @@ -954,7 +954,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase { .load() .select($"value".as[String]) - testUtils.withTranscationalProducer { producer => + testUtils.withTransactionalProducer { producer => producer.beginTransaction() (0 to 3).foreach { i => producer.send(new ProducerRecord[String, String](topic, i.toString)).get() @@ -970,7 +970,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase { // this case, if we forget to reset `FetchedData._nextOffsetInFetchedData` or // `FetchedData._offsetAfterPoll` (See SPARK-25495), the next batch will see incorrect // values and return wrong results hence fail the test. - testUtils.withTranscationalProducer { producer => + testUtils.withTransactionalProducer { producer => producer.beginTransaction() (4 to 7).foreach { i => producer.send(new ProducerRecord[String, String](topic, i.toString)).get() @@ -1472,7 +1472,7 @@ abstract class KafkaSourceSuiteBase extends KafkaSourceTest { withTable(table) { val topic = newTopic() testUtils.createTopic(topic) - testUtils.withTranscationalProducer { producer => + testUtils.withTransactionalProducer { producer => val df = spark .readStream .format("kafka") diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala index eb186970fc25d..4dc652bdf06aa 100644 --- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala +++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala @@ -239,7 +239,7 @@ class KafkaRelationSuite extends QueryTest with SharedSQLContext with KafkaTest test("read Kafka transactional messages: read_committed") { val topic = newTopic() testUtils.createTopic(topic) - testUtils.withTranscationalProducer { producer => + testUtils.withTransactionalProducer { producer => val df = spark .read .format("kafka") @@ -288,7 +288,7 @@ class KafkaRelationSuite extends QueryTest with SharedSQLContext with KafkaTest test("read Kafka transactional messages: read_uncommitted") { val topic = newTopic() testUtils.createTopic(topic) - testUtils.withTranscationalProducer { producer => + testUtils.withTransactionalProducer { producer => val df = spark .read .format("kafka") diff --git a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala index bf6934be52705..c67188a0a0418 100644 --- a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala +++ b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaTestUtils.scala @@ -348,7 +348,7 @@ class KafkaTestUtils(withBrokerProps: Map[String, Object] = Map.empty) extends L } /** Call `f` with a `KafkaProducer` that has initialized transactions. */ - def withTranscationalProducer(f: KafkaProducer[String, String] => Unit): Unit = { + def withTransactionalProducer(f: KafkaProducer[String, String] => Unit): Unit = { val props = producerConfiguration props.put("transactional.id", UUID.randomUUID().toString) val producer = new KafkaProducer[String, String](props) @@ -390,7 +390,7 @@ class KafkaTestUtils(withBrokerProps: Map[String, Object] = Map.empty) extends L // ensure that logs from all replicas are deleted if delete topic is marked successful assert(servers.forall(server => topicAndPartitions.forall(tp => server.getLogManager().getLog(tp).isEmpty)), - s"topic $topic still exists in log mananger") + s"topic $topic still exists in log manager") // ensure that topic is removed from all cleaner offsets assert(servers.forall(server => topicAndPartitions.forall { tp => val checkpoints = server.getLogManager().liveLogDirs.map { logDir => diff --git a/external/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java b/external/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java index 5a21253a0174d..6f58288161cc2 100644 --- a/external/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java +++ b/external/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java @@ -48,7 +48,7 @@ * * Usage: JavaKinesisWordCountASL [app-name] [stream-name] [endpoint-url] [region-name] * [app-name] is the name of the consumer app, used to track the read data in DynamoDB - * [stream-name] name of the Kinesis stream (ie. mySparkStream) + * [stream-name] name of the Kinesis stream (i.e. mySparkStream) * [endpoint-url] endpoint of the Kinesis service * (e.g. https://kinesis.us-east-1.amazonaws.com) * diff --git a/external/kinesis-asl/src/main/python/examples/streaming/kinesis_wordcount_asl.py b/external/kinesis-asl/src/main/python/examples/streaming/kinesis_wordcount_asl.py index 5370b793897bb..66c9ed80f3b33 100644 --- a/external/kinesis-asl/src/main/python/examples/streaming/kinesis_wordcount_asl.py +++ b/external/kinesis-asl/src/main/python/examples/streaming/kinesis_wordcount_asl.py @@ -23,7 +23,7 @@ Usage: kinesis_wordcount_asl.py is the name of the consumer app, used to track the read data in DynamoDB - name of the Kinesis stream (ie. mySparkStream) + name of the Kinesis stream (i.e. mySparkStream) endpoint of the Kinesis service (e.g. https://kinesis.us-east-1.amazonaws.com) region name of the Kinesis endpoint (e.g. us-east-1) diff --git a/external/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala b/external/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala index c78737d1d7fe0..755d5e9a25005 100644 --- a/external/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala +++ b/external/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala @@ -43,7 +43,7 @@ import org.apache.spark.streaming.kinesis.KinesisInputDStream * * Usage: KinesisWordCountASL * is the name of the consumer app, used to track the read data in DynamoDB - * name of the Kinesis stream (ie. mySparkStream) + * name of the Kinesis stream (i.e. mySparkStream) * endpoint of the Kinesis service * (e.g. https://kinesis.us-east-1.amazonaws.com) * @@ -167,9 +167,9 @@ object KinesisWordCountASL extends Logging { * Usage: KinesisWordProducerASL \ * * - * is the name of the Kinesis stream (ie. mySparkStream) + * is the name of the Kinesis stream (i.e. mySparkStream) * is the endpoint of the Kinesis service - * (ie. https://kinesis.us-east-1.amazonaws.com) + * (i.e. https://kinesis.us-east-1.amazonaws.com) * is the rate of records per second to put onto the stream * is the number of words per record * diff --git a/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisUtils.scala b/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisUtils.scala index c60b9896a3473..81270607e01fa 100644 --- a/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisUtils.scala +++ b/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisUtils.scala @@ -597,7 +597,7 @@ private class KinesisUtilsPythonHelper { // scalastyle:on if (!(stsAssumeRoleArn != null && stsSessionName != null && stsExternalId != null) && !(stsAssumeRoleArn == null && stsSessionName == null && stsExternalId == null)) { - throw new IllegalArgumentException("stsAssumeRoleArn, stsSessionName, and stsExtenalId " + + throw new IllegalArgumentException("stsAssumeRoleArn, stsSessionName, and stsExternalId " + "must all be defined or all be null") } diff --git a/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala b/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala index 1e4c6c74bd184..364c6f10191ed 100644 --- a/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala +++ b/graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala @@ -205,8 +205,8 @@ class PageRankSuite extends SparkFunSuite with LocalSparkContext { withSpark { sc => // Check that implementation can handle large vertexIds, SPARK-25149 val vertexIdOffset = Int.MaxValue.toLong + 1 - val sourceOffest = 4 - val source = vertexIdOffset + sourceOffest + val sourceOffset = 4 + val source = vertexIdOffset + sourceOffset val numIter = 10 val vertices = vertexIdOffset until vertexIdOffset + numIter val chain1 = vertices.zip(vertices.tail) @@ -216,7 +216,7 @@ class PageRankSuite extends SparkFunSuite with LocalSparkContext { val tol = 0.0001 val errorTol = 1.0e-1 - val a = resetProb / (1 - Math.pow(1 - resetProb, numIter - sourceOffest)) + val a = resetProb / (1 - Math.pow(1 - resetProb, numIter - sourceOffset)) // We expect the rank to decay as (1 - resetProb) ^ distance val expectedRanks = sc.parallelize(vertices).map { vid => val rank = if (vid < source) {