Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 0 additions & 7 deletions docs/sql-programming-guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -924,13 +924,6 @@ Configuration of Parquet can be done using the `setConf` method on `SparkSession
flag tells Spark SQL to interpret INT96 data as a timestamp to provide compatibility with these systems.
</td>
</tr>
<tr>
<td><code>spark.sql.parquet.cacheMetadata</code></td>
<td>true</td>
<td>
Turns on caching of Parquet schema metadata. Can speed up querying of static data.
</td>
</tr>
<tr>
<td><code>spark.sql.parquet.compression.codec</code></td>
<td>snappy</td>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -284,11 +284,6 @@ object SQLConf {
.booleanConf
.createWithDefault(false)

val PARQUET_CACHE_METADATA = buildConf("spark.sql.parquet.cacheMetadata")
.doc("Turns on caching of Parquet schema metadata. Can speed up querying of static data.")
.booleanConf
.createWithDefault(true)

val PARQUET_COMPRESSION = buildConf("spark.sql.parquet.compression.codec")
.doc("Sets the compression codec use when writing Parquet files. Acceptable values include: " +
"uncompressed, snappy, gzip, lzo.")
Expand Down Expand Up @@ -1010,8 +1005,6 @@ class SQLConf extends Serializable with Logging {

def parquetCompressionCodec: String = getConf(PARQUET_COMPRESSION)

def parquetCacheMetadata: Boolean = getConf(PARQUET_CACHE_METADATA)

def parquetVectorizedReaderEnabled: Boolean = getConf(PARQUET_VECTORIZED_READER_ENABLED)

def columnBatchSize: Int = getConf(COLUMN_BATCH_SIZE)
Expand Down