Skip to content

Commit b7b319c

Browse files
committed
Bug fixes with InputFileName expression.
1 parent bab0c85 commit b7b319c

File tree

2 files changed

+14
-14
lines changed

2 files changed

+14
-14
lines changed

sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/InputFileName.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ case class InputFileName() extends LeafExpression with Nondeterministic {
4343
override def genCode(ctx: CodeGenContext, ev: GeneratedExpressionCode): String = {
4444
ev.isNull = "false"
4545
s"final ${ctx.javaType(dataType)} ${ev.value} = " +
46-
"org.apache.spark.rdd.SqlNewHadoopRDD.getInputFileName();"
46+
"org.apache.spark.rdd.SqlNewHadoopRDDState.getInputFileName();"
4747
}
4848

4949
}

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,9 @@ import org.apache.spark.{Partition => SparkPartition, _}
3838

3939

4040
private[spark] class SqlNewHadoopPartition(
41-
rddId: Int,
42-
val index: Int,
43-
rawSplit: InputSplit with Writable)
41+
rddId: Int,
42+
val index: Int,
43+
rawSplit: InputSplit with Writable)
4444
extends SparkPartition {
4545

4646
val serializableHadoopSplit = new SerializableWritable(rawSplit)
@@ -62,13 +62,13 @@ private[spark] class SqlNewHadoopPartition(
6262
* changes based on [[org.apache.spark.rdd.HadoopRDD]].
6363
*/
6464
private[spark] class SqlNewHadoopRDD[V: ClassTag](
65-
sqlContext: SQLContext,
66-
broadcastedConf: Broadcast[SerializableConfiguration],
67-
@transient private val initDriverSideJobFuncOpt: Option[Job => Unit],
68-
initLocalJobFuncOpt: Option[Job => Unit],
69-
inputFormatClass: Class[_ <: InputFormat[Void, V]],
70-
valueClass: Class[V])
71-
extends RDD[V](sqlContext.sparkContext, Nil)
65+
sqlContext: SQLContext,
66+
broadcastedConf: Broadcast[SerializableConfiguration],
67+
@transient private val initDriverSideJobFuncOpt: Option[Job => Unit],
68+
initLocalJobFuncOpt: Option[Job => Unit],
69+
inputFormatClass: Class[_ <: InputFormat[Void, V]],
70+
valueClass: Class[V])
71+
extends RDD[V](sqlContext.sparkContext, Nil)
7272
with SparkHadoopMapReduceUtil
7373
with Logging {
7474

@@ -281,9 +281,9 @@ private[spark] class SqlNewHadoopRDD[V: ClassTag](
281281
* the given function rather than the index of the partition.
282282
*/
283283
private[spark] class NewHadoopMapPartitionsWithSplitRDD[U: ClassTag, T: ClassTag](
284-
prev: RDD[T],
285-
f: (InputSplit, Iterator[T]) => Iterator[U],
286-
preservesPartitioning: Boolean = false)
284+
prev: RDD[T],
285+
f: (InputSplit, Iterator[T]) => Iterator[U],
286+
preservesPartitioning: Boolean = false)
287287
extends RDD[U](prev) {
288288

289289
override val partitioner = if (preservesPartitioning) firstParent[T].partitioner else None

0 commit comments

Comments
 (0)