Skip to content

Commit a97a0a8

Browse files
committed
Support for writing CSV with a single function call
1 parent 318bf41 commit a97a0a8

2 files changed

Lines changed: 30 additions & 2 deletions

File tree

sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -464,6 +464,12 @@ final class DataFrameWriter private[sql](df: DataFrame) {
464464
* format("parquet").save(path)
465465
* }}}
466466
*
467+
* You can set the following JSON-specific options for writing JSON files:
468+
* <li>`compression` or `codec` (default `null`): compression codec to use when saving to file.
469+
* This should be the fully qualified name of a class implementing
470+
* [[org.apache.hadoop.io.compress.CompressionCodec]] or one of the known case-insensitive
471+
* shorten names(`bzip2`, `gzip`, `lz4`, and `snappy`). </li>
472+
*
467473
* @since 1.4.0
468474
*/
469475
def parquet(path: String): Unit = format("parquet").save(path)
@@ -492,10 +498,33 @@ final class DataFrameWriter private[sql](df: DataFrame) {
492498
* df.write().text("/path/to/output")
493499
* }}}
494500
*
501+
* You can set the following options for writing text files:
502+
* <li>`compression` or `codec` (default `null`): compression codec to use when saving to file.
503+
* This should be the fully qualified name of a class implementing
504+
* [[org.apache.hadoop.io.compress.CompressionCodec]] or one of the known case-insensitive
505+
* shorten names(`bzip2`, `gzip`, `lz4`, and `snappy`). </li>
506+
*
495507
* @since 1.6.0
496508
*/
497509
def text(path: String): Unit = format("text").save(path)
498510

511+
/**
512+
* Saves the content of the [[DataFrame]] in CSV format at the specified path.
513+
* This is equivalent to:
514+
* {{{
515+
* format("csv").save(path)
516+
* }}}
517+
*
518+
* You can set the following CSV-specific options for writing CSV files:
519+
* <li>`compression` or `codec` (default `null`): compression codec to use when saving to file.
520+
* This should be the fully qualified name of a class implementing
521+
* [[org.apache.hadoop.io.compress.CompressionCodec]] or one of the known case-insensitive
522+
* shorten names(`bzip2`, `gzip`, `lz4`, and `snappy`). </li>
523+
*
524+
* @since 2.0.0
525+
*/
526+
def csv(path: String): Unit = format("csv").save(path)
527+
499528
///////////////////////////////////////////////////////////////////////////////////////
500529
// Builder pattern config options
501530
///////////////////////////////////////////////////////////////////////////////////////

sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -268,9 +268,8 @@ class CSVSuite extends QueryTest with SharedSQLContext with SQLTestUtils {
268268
.load(testFile(carsFile))
269269

270270
cars.coalesce(1).write
271-
.format("csv")
272271
.option("header", "true")
273-
.save(csvDir)
272+
.csv(csvDir)
274273

275274
val carsCopy = sqlContext.read
276275
.format("csv")

0 commit comments

Comments
 (0)