Skip to content

Commit ed2d8d0

Browse files
committed
[HUDI-5327] Fix spark stages when using row writer
1 parent 4a825dc commit ed2d8d0

1 file changed

Lines changed: 3 additions & 2 deletions

File tree

hudi-client/hudi-spark-client/src/main/scala/org/apache/hudi/HoodieDatasetBulkInsertHelper.scala

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ import org.apache.hudi.common.engine.TaskContextSupplier
2525
import org.apache.hudi.common.model.{HoodieRecord, HoodieRecordPayload}
2626
import org.apache.hudi.common.util.ReflectionUtils
2727
import org.apache.hudi.config.HoodieWriteConfig
28+
import org.apache.hudi.data.HoodieJavaRDD
2829
import org.apache.hudi.index.SparkHoodieIndexFactory
2930
import org.apache.hudi.keygen.{BuiltinKeyGenerator, SparkKeyGeneratorInterface}
3031
import org.apache.hudi.table.{BulkInsertPartitioner, HoodieTable}
@@ -150,8 +151,8 @@ object HoodieDatasetBulkInsertHelper extends Logging {
150151
}
151152

152153
writer.getWriteStatuses.asScala.map(_.toWriteStatus).iterator
153-
}).collect()
154-
table.getContext.parallelize(writeStatuses.toList.asJava)
154+
})
155+
HoodieJavaRDD.of(writeStatuses)
155156
}
156157

157158
private def dedupeRows(rdd: RDD[InternalRow], schema: StructType, preCombineFieldRef: String, isGlobalIndex: Boolean): RDD[InternalRow] = {

0 commit comments

Comments
 (0)