diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md index ee231a934a3a..52d6b9d73567 100644 --- a/docs/sql-programming-guide.md +++ b/docs/sql-programming-guide.md @@ -924,13 +924,6 @@ Configuration of Parquet can be done using the `setConf` method on `SparkSession flag tells Spark SQL to interpret INT96 data as a timestamp to provide compatibility with these systems. - - spark.sql.parquet.cacheMetadata - true - - Turns on caching of Parquet schema metadata. Can speed up querying of static data. - - spark.sql.parquet.compression.codec snappy @@ -1587,6 +1580,9 @@ options. Note that this is different from the Hive behavior. - As a result, `DROP TABLE` statements on those tables will not remove the data. + - `spark.sql.parquet.cacheMetadata` is no longer used. + See [SPARK-13664](https://issues.apache.org/jira/browse/SPARK-13664) for details. + ## Upgrading From Spark SQL 1.5 to 1.6 - From Spark 1.6, by default the Thrift server runs in multi-session mode. Which means each JDBC/ODBC diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala index c407874381ac..f588abd463bb 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala @@ -284,11 +284,6 @@ object SQLConf { .booleanConf .createWithDefault(false) - val PARQUET_CACHE_METADATA = buildConf("spark.sql.parquet.cacheMetadata") - .doc("Turns on caching of Parquet schema metadata. Can speed up querying of static data.") - .booleanConf - .createWithDefault(true) - val PARQUET_COMPRESSION = buildConf("spark.sql.parquet.compression.codec") .doc("Sets the compression codec use when writing Parquet files. Acceptable values include: " + "uncompressed, snappy, gzip, lzo.") @@ -1010,8 +1005,6 @@ class SQLConf extends Serializable with Logging { def parquetCompressionCodec: String = getConf(PARQUET_COMPRESSION) - def parquetCacheMetadata: Boolean = getConf(PARQUET_CACHE_METADATA) - def parquetVectorizedReaderEnabled: Boolean = getConf(PARQUET_VECTORIZED_READER_ENABLED) def columnBatchSize: Int = getConf(COLUMN_BATCH_SIZE)