Skip to content

Commit b0f7806

Browse files
Optimizing imports in ParquetTestData
1 parent 85fea2d commit b0f7806

1 file changed

Lines changed: 5 additions & 9 deletions

File tree

sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTestData.scala

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -19,20 +19,16 @@ package org.apache.spark.sql.parquet
1919

2020
import org.apache.hadoop.conf.Configuration
2121
import org.apache.hadoop.fs.Path
22-
import org.apache.hadoop.mapreduce.Job
2322

23+
import parquet.example.data.{GroupWriter, Group}
24+
import parquet.example.data.simple.SimpleGroup
2425
import parquet.hadoop.ParquetWriter
25-
import parquet.hadoop.util.ContextUtil
26+
import parquet.hadoop.api.WriteSupport
27+
import parquet.hadoop.api.WriteSupport.WriteContext
28+
import parquet.io.api.RecordConsumer
2629
import parquet.schema.{MessageType, MessageTypeParser}
2730

28-
import org.apache.spark.sql.catalyst.expressions.GenericRow
2931
import org.apache.spark.util.Utils
30-
import parquet.hadoop.metadata.CompressionCodecName
31-
import parquet.hadoop.api.WriteSupport
32-
import parquet.example.data.{GroupWriter, Group}
33-
import parquet.io.api.RecordConsumer
34-
import parquet.hadoop.api.WriteSupport.WriteContext
35-
import parquet.example.data.simple.SimpleGroup
3632

3733
// Write support class for nested groups: ParquetWriter initializes GroupWriteSupport
3834
// with an empty configuration (it is after all not intended to be used in this way?)

0 commit comments

Comments
 (0)