@@ -38,9 +38,9 @@ import org.apache.spark.{Partition => SparkPartition, _}
3838
3939
4040private [spark] class SqlNewHadoopPartition (
41- rddId : Int ,
42- val index : Int ,
43- rawSplit : InputSplit with Writable )
41+ rddId : Int ,
42+ val index : Int ,
43+ rawSplit : InputSplit with Writable )
4444 extends SparkPartition {
4545
4646 val serializableHadoopSplit = new SerializableWritable (rawSplit)
@@ -62,13 +62,13 @@ private[spark] class SqlNewHadoopPartition(
6262 * changes based on [[org.apache.spark.rdd.HadoopRDD ]].
6363 */
6464private [spark] class SqlNewHadoopRDD [V : ClassTag ](
65- sqlContext : SQLContext ,
66- broadcastedConf : Broadcast [SerializableConfiguration ],
67- @ transient private val initDriverSideJobFuncOpt : Option [Job => Unit ],
68- initLocalJobFuncOpt : Option [Job => Unit ],
69- inputFormatClass : Class [_ <: InputFormat [Void , V ]],
70- valueClass : Class [V ])
71- extends RDD [V ](sqlContext.sparkContext, Nil )
65+ sqlContext : SQLContext ,
66+ broadcastedConf : Broadcast [SerializableConfiguration ],
67+ @ transient private val initDriverSideJobFuncOpt : Option [Job => Unit ],
68+ initLocalJobFuncOpt : Option [Job => Unit ],
69+ inputFormatClass : Class [_ <: InputFormat [Void , V ]],
70+ valueClass : Class [V ])
71+ extends RDD [V ](sqlContext.sparkContext, Nil )
7272 with SparkHadoopMapReduceUtil
7373 with Logging {
7474
@@ -281,9 +281,9 @@ private[spark] class SqlNewHadoopRDD[V: ClassTag](
281281 * the given function rather than the index of the partition.
282282 */
283283 private [spark] class NewHadoopMapPartitionsWithSplitRDD [U : ClassTag , T : ClassTag ](
284- prev : RDD [T ],
285- f : (InputSplit , Iterator [T ]) => Iterator [U ],
286- preservesPartitioning : Boolean = false )
284+ prev : RDD [T ],
285+ f : (InputSplit , Iterator [T ]) => Iterator [U ],
286+ preservesPartitioning : Boolean = false )
287287 extends RDD [U ](prev) {
288288
289289 override val partitioner = if (preservesPartitioning) firstParent[T ].partitioner else None
0 commit comments