Skip to content

Commit 5b49ae0

Browse files
committed
fix compatibility
1 parent 223ce2c commit 5b49ae0

2 files changed

Lines changed: 3 additions & 4 deletions

File tree

sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -106,11 +106,10 @@ public class VectorizedParquetRecordReader extends SpecificParquetRecordReaderBa
106106
/**
107107
* Implementation of RecordReader API.
108108
*/
109-
public void initialize(InputSplit inputSplit, TaskAttemptContext taskAttemptContext,
110-
Configuration conf)
109+
public void initialize(InputSplit inputSplit, TaskAttemptContext taskAttemptContext)
111110
throws IOException, InterruptedException, UnsupportedOperationException {
112111
super.initialize(inputSplit, taskAttemptContext);
113-
this.conf = conf;
112+
this.conf = taskAttemptContext.getConfiguration();
114113
initializeInternal();
115114
}
116115

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFileFormat.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -373,7 +373,7 @@ class ParquetFileFormat
373373
}
374374
val parquetReader = if (enableVectorizedReader) {
375375
val vectorizedReader = new VectorizedParquetRecordReader()
376-
vectorizedReader.initialize(split, hadoopAttemptContext, broadcastedHadoopConf.value.value)
376+
vectorizedReader.initialize(split, hadoopAttemptContext)
377377
logDebug(s"Appending $partitionSchema ${file.partitionValues}")
378378
vectorizedReader.initBatch(partitionSchema, file.partitionValues)
379379
if (returningBatch) {

0 commit comments

Comments
 (0)