Skip to content
Closed
Show file tree
Hide file tree
Changes from 5 commits
Commits
Show all changes
64 commits
Select commit Hold shift + click to select a range
6ca7771
add insert overwrite local directory
janewangfb Aug 17, 2017
a975536
Add Unittests
janewangfb Aug 17, 2017
a15bf4e
fix local path
janewangfb Aug 17, 2017
9f596fd
Merge branch 'master' into port_local_directory
janewangfb Aug 17, 2017
b9db02e
fix style
janewangfb Aug 17, 2017
e516bec
Merge branch 'master' into port_local_directory
janewangfb Aug 18, 2017
e05624f
condense storage
janewangfb Aug 18, 2017
7f5664d
change InsertInto to InsertIntoTable
janewangfb Aug 18, 2017
d50b3a2
add InsertIntoDirectory
janewangfb Aug 19, 2017
61a18a2
update insertInto
janewangfb Aug 19, 2017
4c19aaf
SQLQuerySuite passed
janewangfb Aug 19, 2017
47fde8a
fix comments
janewangfb Aug 19, 2017
068662a
Merge branch 'master' into port_local_directory
janewangfb Aug 21, 2017
da7065b
Add tableProdier
janewangfb Aug 21, 2017
7f4b488
Add InsertIntoDataSourceDirCommand
janewangfb Aug 21, 2017
051018e
Merge branch 'master' into port_local_directory
janewangfb Aug 21, 2017
73f605e
fix style
janewangfb Aug 21, 2017
8261b39
fix style
janewangfb Aug 21, 2017
163c124
Merge branch 'master' into port_local_directory
janewangfb Aug 22, 2017
0882dd1
Address gatorsmile's comments
janewangfb Aug 22, 2017
c813ad8
Use withTempDir
janewangfb Aug 22, 2017
bc5424c
line length
janewangfb Aug 22, 2017
675ffd7
merge master
janewangfb Aug 23, 2017
f36e933
Address gatorsmile's comment
janewangfb Aug 23, 2017
64f37f4
fix typo
janewangfb Aug 23, 2017
b2068ce
Merge branch 'master' into port_local_directory
janewangfb Aug 31, 2017
8ebe5e2
Address gatorsmile's comments
janewangfb Aug 31, 2017
51f9a0a
fix comments and style
janewangfb Aug 31, 2017
e2db5e1
add more comments
janewangfb Aug 31, 2017
7bb5c85
Merge branch 'master' into port_local_directory
janewangfb Sep 1, 2017
2e7a29b
Merge branch 'master' into port_local_directory
janewangfb Sep 1, 2017
7ccbde4
Address gatorsmile's comments
janewangfb Sep 2, 2017
52350e8
fix style
janewangfb Sep 2, 2017
2ec9947
address Tejas' comment
janewangfb Sep 3, 2017
05d9d20
Merge branch 'master' into port_local_directory
janewangfb Sep 5, 2017
392593b
check point
janewangfb Sep 5, 2017
511cfc3
checkpoint
janewangfb Sep 5, 2017
77948bb
Merge insertIntoTable and insertIntoDirectory
janewangfb Sep 5, 2017
fd9322c
refactoring temp dir
janewangfb Sep 6, 2017
e590847
add TODO for tmpPath
janewangfb Sep 6, 2017
d1b4ec8
Merge branch 'master' into port_local_directory
janewangfb Sep 6, 2017
dd6a418
use tmpPath
janewangfb Sep 6, 2017
c2c693c
fix style
janewangfb Sep 6, 2017
e9c88b5
add exists check
janewangfb Sep 6, 2017
62370fd
Merge branch 'master' into port_local_directory
janewangfb Sep 6, 2017
b00acdc
Merge branch 'master' into port_local_directory
janewangfb Sep 6, 2017
b64520b
fix build
janewangfb Sep 6, 2017
3aaf6e8
fix build failure
janewangfb Sep 6, 2017
b461e00
fix build failure
janewangfb Sep 6, 2017
e9c24de
address's gatorsmile's comment
janewangfb Sep 6, 2017
28fcb39
Merge branch 'master' into port_local_directory
janewangfb Sep 6, 2017
0c03a2b
address gatorsmile's comment
janewangfb Sep 6, 2017
6c24b1b
remove unused import
janewangfb Sep 6, 2017
c4ab411
merge master
janewangfb Sep 7, 2017
0ec103e
address gatorsmile's comments
janewangfb Sep 7, 2017
160c0ec
fix style
janewangfb Sep 7, 2017
95ebfd3
add more unittest
janewangfb Sep 7, 2017
4a5ff29
add multi insert
janewangfb Sep 7, 2017
c99872b
Merge branch 'master' into port_local_directory
janewangfb Sep 9, 2017
449249e
address gatorsmile's comments
janewangfb Sep 9, 2017
7919041
address viirya's comment
janewangfb Sep 9, 2017
aeb5d5e
address viirya's comment
janewangfb Sep 9, 2017
81382df
Merge branch 'master' into port_local_directory
janewangfb Sep 9, 2017
f93d57a
address viirya's comment
janewangfb Sep 9, 2017
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,7 @@ query
insertInto
: INSERT OVERWRITE TABLE tableIdentifier (partitionSpec (IF NOT EXISTS)?)?
| INSERT INTO TABLE? tableIdentifier partitionSpec?
| INSERT OVERWRITE LOCAL? DIRECTORY path=STRING rowFormat? createFileFormat?
;

partitionSpecLocation
Expand Down Expand Up @@ -740,6 +741,7 @@ nonReserved
| AND | CASE | CAST | DISTINCT | DIV | ELSE | END | FUNCTION | INTERVAL | MACRO | OR | STRATIFY | THEN
| UNBOUNDED | WHEN
| DATABASE | SELECT | FROM | WHERE | HAVING | TO | TABLE | WITH | NOT | CURRENT_DATE | CURRENT_TIMESTAMP
| DIRECTORY
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you add it to TableIdentifierParserSuite?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it is already in TableIdentifierParserSuite

;

SELECT: 'SELECT';
Expand Down Expand Up @@ -810,6 +812,7 @@ WITH: 'WITH';
VALUES: 'VALUES';
CREATE: 'CREATE';
TABLE: 'TABLE';
DIRECTORY: 'DIRECTORY';
VIEW: 'VIEW';
REPLACE: 'REPLACE';
INSERT: 'INSERT';
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -142,10 +142,14 @@ object UnsupportedOperationChecker {
"Distinct aggregations are not supported on streaming DataFrames/Datasets. Consider " +
"using approx_count_distinct() instead.")


Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: revert it back.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

reverted

case _: Command =>
throwError("Commands like CreateTable*, AlterTable*, Show* are not supported with " +
"streaming DataFrames/Datasets")

case _: InsertIntoDir =>
throwError("InsertIntoDir is not supported with streaming DataFrames/Datasets")

// mapGroupsWithState and flatMapGroupsWithState
case m: FlatMapGroupsWithState if m.isStreaming =>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ class AstBuilder(conf: SQLConf) extends SqlBaseBaseVisitor[AnyRef] with Logging
/**
* Add an INSERT INTO [TABLE]/INSERT OVERWRITE TABLE operation to the logical plan.
*/
private def withInsertInto(
protected def withInsertInto(
ctx: InsertIntoContext,
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
val tableIdent = visitTableIdentifier(ctx.tableIdentifier)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
package org.apache.spark.sql.catalyst.plans.logical

import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation
import org.apache.spark.sql.catalyst.catalog.CatalogTable
import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTable}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.plans._
Expand Down Expand Up @@ -359,6 +359,18 @@ case class InsertIntoTable(
override lazy val resolved: Boolean = false
}

case class InsertIntoDir(
Copy link
Member

@gatorsmile gatorsmile Aug 21, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should be InsertOverwriteDir. Let us make it extensible. Keep the current name InsertIntoDir with another parameter overwrite: Boolean.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok. added.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you add function descriptions and parameter descriptions?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

added

path: String,
isLocal: Boolean,
rowStorage: CatalogStorageFormat,
fileStorage: CatalogStorageFormat,
child: LogicalPlan)
extends LogicalPlan {

override def children: Seq[LogicalPlan] = child :: Nil
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: We can simply extend UnaryNode and remove children.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

updated.

override def output: Seq[Attribute] = Seq.empty
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Set override lazy val resolved: Boolean = false

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

updated

}

/**
* A container for holding the view description(CatalogTable), and the output of the view. The
* child should be a logical plan parsed from the `CatalogTable.viewText`, should throw an error
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,12 @@ import org.antlr.v4.runtime.tree.TerminalNode

import org.apache.spark.sql.SaveMode
import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier}
import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.parser._
import org.apache.spark.sql.catalyst.parser.SqlBaseParser._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.plans.logical.{InsertIntoDir, _}
import org.apache.spark.sql.execution.command._
import org.apache.spark.sql.execution.datasources.{CreateTable, _}
import org.apache.spark.sql.internal.{HiveSerDe, SQLConf, VariableSubstitution}
Expand Down Expand Up @@ -1499,4 +1500,34 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder(conf) {
query: LogicalPlan): LogicalPlan = {
RepartitionByExpression(expressions, query, conf.numShufflePartitions)
}

/**
* Add an INSERT INTO [TABLE]/INSERT OVERWRITE TABLE or INSERT INTO [LOCAL] DIRECOTRY
* operation to the logical plan.
*/
protected override def withInsertInto(ctx: InsertIntoContext,
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Indents

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fixed

val tableIdent = Option(ctx.tableIdentifier).map(visitTableIdentifier)
val partitionKeys = Option(ctx.partitionSpec).map(visitPartitionSpec).getOrElse(Map.empty)

val dynamicPartitionKeys = partitionKeys.filter(_._2.isEmpty)
if (ctx.EXISTS != null && dynamicPartitionKeys.nonEmpty) {
throw new ParseException(s"Dynamic partitions do not support IF NOT EXISTS. Specified " +
"partitions with value: " + dynamicPartitionKeys.keys.mkString("[", ",", "]"), ctx)
}

validateRowFormatFileFormat(ctx.rowFormat, ctx.createFileFormat, ctx)
val rowStorage = Option(ctx.rowFormat).map(visitRowFormat)
.getOrElse(CatalogStorageFormat.empty)
val fileStorage = Option(ctx.createFileFormat).map(visitCreateFileFormat)
.getOrElse(CatalogStorageFormat.empty)

tableIdent match {
case Some(ti: TableIdentifier) =>
InsertIntoTable(UnresolvedRelation(ti), partitionKeys, query,
ctx.OVERWRITE != null, ctx.EXISTS != null)
case _ =>
InsertIntoDir(string(ctx.path), ctx.LOCAL != null, rowStorage, fileStorage, query)
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.planning._
import org.apache.spark.sql.catalyst.plans.logical.{InsertIntoTable, LogicalPlan, ScriptTransformation}
import org.apache.spark.sql.catalyst.plans.logical.{InsertIntoDir, InsertIntoTable, LogicalPlan, ScriptTransformation}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.command.{CreateTableCommand, DDLUtils}
Expand Down Expand Up @@ -155,6 +155,9 @@ object HiveAnalysis extends Rule[LogicalPlan] {

case CreateTable(tableDesc, mode, Some(query)) if DDLUtils.isHiveTable(tableDesc) =>
CreateHiveTableAsSelectCommand(tableDesc, query, mode)

case InsertIntoDir(path, isLocal, rowStorage, fileStorage, child) =>
InsertIntoDirCommand(path, isLocal, rowStorage, fileStorage, child)
}
}

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.sql.hive.execution

import java.net.URI
import java.util.Properties

import scala.language.existentials

import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hive.common.FileUtils
import org.apache.hadoop.hive.ql.plan.TableDesc
import org.apache.hadoop.hive.serde.serdeConstants
import org.apache.hadoop.hive.serde2.`lazy`.LazySimpleSerDe
import org.apache.hadoop.mapred._

import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.catalyst.catalog.CatalogStorageFormat
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.internal.HiveSerDe
import org.apache.spark.util.Utils


case class InsertIntoDirCommand(path: String,
isLocal: Boolean,
rowStorage: CatalogStorageFormat,
fileStorage: CatalogStorageFormat,
query: LogicalPlan) extends SaveAsHiveFile {

override def children: Seq[LogicalPlan] = query :: Nil

override def run(sparkSession: SparkSession, children: Seq[SparkPlan]): Seq[Row] = {
assert(children.length == 1)

val Array(cols, types) = children.head.output.foldLeft(Array("", "")) { case (r, a) =>
r(0) = r(0) + a.name + ","
r(1) = r(1) + a.dataType.catalogString + ":"
r
}

val properties = new Properties()
properties.put("columns", cols.dropRight(1))
properties.put("columns.types", types.dropRight(1))

val sqlContext = sparkSession.sqlContext

val defaultStorage: CatalogStorageFormat = {
val defaultStorageType =
sqlContext.conf.getConfString("hive.default.fileformat", "textfile")
val defaultHiveSerde = HiveSerDe.sourceToSerDe(defaultStorageType)
CatalogStorageFormat(
locationUri = None,
inputFormat = defaultHiveSerde.flatMap(_.inputFormat)
.orElse(Some("org.apache.hadoop.mapred.TextInputFormat")),
outputFormat = defaultHiveSerde.flatMap(_.outputFormat)
.orElse(Some("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat")),
serde = defaultHiveSerde.flatMap(_.serde),
compressed = false,
properties = Map())
}

val pathUri = if (isLocal) Utils.resolveURI(path) else new URI(path)
val storage = CatalogStorageFormat(
locationUri = Some(pathUri),
inputFormat = fileStorage.inputFormat.orElse(defaultStorage.inputFormat),
outputFormat = fileStorage.outputFormat.orElse(defaultStorage.outputFormat),
serde = rowStorage.serde.orElse(fileStorage.serde).orElse(defaultStorage.serde),
compressed = false,
properties = rowStorage.properties ++ fileStorage.properties)

properties.put(serdeConstants.SERIALIZATION_LIB,
storage.serde.getOrElse(classOf[LazySimpleSerDe].getName))

import scala.collection.JavaConverters._
properties.putAll(rowStorage.properties.asJava)
properties.putAll(fileStorage.properties.asJava)

var tableDesc = new TableDesc(
Utils.classForName(storage.inputFormat.get).asInstanceOf[Class[_ <: InputFormat[_, _]]],
Utils.classForName(storage.outputFormat.get),
properties
)

val hadoopConf = sparkSession.sessionState.newHadoopConf()
val jobConf = new JobConf(hadoopConf)

val targetPath = new Path(path)
val writeToPath =
if (isLocal) {
val localFileSystem = FileSystem.getLocal(jobConf)
val localPath = localFileSystem.makeQualified(targetPath)
if (localFileSystem.exists(localPath)) {
localFileSystem.delete(localPath, true)
}
localPath
} else {
val qualifiedPath = FileUtils.makeQualified(targetPath, hadoopConf)
val dfs = qualifiedPath.getFileSystem(jobConf)
if (dfs.exists(qualifiedPath)) {
dfs.delete(qualifiedPath, true)
} else {
dfs.mkdirs(qualifiedPath.getParent)
}
qualifiedPath
}

val fileSinkConf = new org.apache.spark.sql.hive.HiveShim.ShimFileSinkDesc(
writeToPath.toString, tableDesc, false)

saveAsHiveFile(
sparkSession = sparkSession,
plan = children.head,
hadoopConf = hadoopConf,
fileSinkConf = fileSinkConf,
outputLocation = path)

Seq.empty[Row]
}
}

Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,12 @@ import org.apache.hadoop.hive.ql.exec.TaskRunner
import org.apache.hadoop.hive.ql.plan.TableDesc

import org.apache.spark.SparkException
import org.apache.spark.internal.io.FileCommitProtocol
import org.apache.spark.sql.{AnalysisException, Row, SparkSession}
import org.apache.spark.sql.catalyst.catalog.CatalogTable
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.command.{CommandUtils, DataWritingCommand}
import org.apache.spark.sql.execution.datasources.FileFormatWriter
import org.apache.spark.sql.execution.command.CommandUtils
import org.apache.spark.sql.hive._
import org.apache.spark.sql.hive.HiveShim.{ShimFileSinkDesc => FileSinkDesc}
import org.apache.spark.sql.hive.client.{HiveClientImpl, HiveVersion}
Expand Down Expand Up @@ -80,7 +78,7 @@ case class InsertIntoHiveTable(
partition: Map[String, Option[String]],
query: LogicalPlan,
overwrite: Boolean,
ifPartitionNotExists: Boolean) extends DataWritingCommand {
ifPartitionNotExists: Boolean) extends SaveAsHiveFile {

override def children: Seq[LogicalPlan] = query :: Nil

Expand Down Expand Up @@ -234,10 +232,9 @@ case class InsertIntoHiveTable(
override def run(sparkSession: SparkSession, children: Seq[SparkPlan]): Seq[Row] = {
assert(children.length == 1)

val sessionState = sparkSession.sessionState
val externalCatalog = sparkSession.sharedState.externalCatalog
val hiveVersion = externalCatalog.asInstanceOf[HiveExternalCatalog].client.version
val hadoopConf = sessionState.newHadoopConf()
val hadoopConf = sparkSession.sessionState.newHadoopConf()
val stagingDir = hadoopConf.get("hive.exec.stagingdir", ".hive-staging")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How about keeping stagingDir ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok. added it back.

val scratchDir = hadoopConf.get("hive.exec.scratchdir", "/tmp/hive")

Expand All @@ -257,20 +254,6 @@ case class InsertIntoHiveTable(
val tmpLocation =
getExternalTmpPath(tableLocation, hiveVersion, hadoopConf, stagingDir, scratchDir)
val fileSinkConf = new FileSinkDesc(tmpLocation.toString, tableDesc, false)
val isCompressed = hadoopConf.get("hive.exec.compress.output", "false").toBoolean

if (isCompressed) {
// Please note that isCompressed, "mapreduce.output.fileoutputformat.compress",
// "mapreduce.output.fileoutputformat.compress.codec", and
// "mapreduce.output.fileoutputformat.compress.type"
// have no impact on ORC because it uses table properties to store compression information.
hadoopConf.set("mapreduce.output.fileoutputformat.compress", "true")
fileSinkConf.setCompressed(true)
fileSinkConf.setCompressCodec(hadoopConf
.get("mapreduce.output.fileoutputformat.compress.codec"))
fileSinkConf.setCompressType(hadoopConf
.get("mapreduce.output.fileoutputformat.compress.type"))
}

val numDynamicPartitions = partition.values.count(_.isEmpty)
val numStaticPartitions = partition.values.count(_.nonEmpty)
Expand Down Expand Up @@ -332,29 +315,20 @@ case class InsertIntoHiveTable(
case _ => // do nothing since table has no bucketing
}

val committer = FileCommitProtocol.instantiate(
sparkSession.sessionState.conf.fileCommitProtocolClass,
jobId = java.util.UUID.randomUUID().toString,
outputPath = tmpLocation.toString)

val partitionAttributes = partitionColumnNames.takeRight(numDynamicPartitions).map { name =>
query.resolve(name :: Nil, sparkSession.sessionState.analyzer.resolver).getOrElse {
throw new AnalysisException(
s"Unable to resolve $name given [${query.output.map(_.name).mkString(", ")}]")
}.asInstanceOf[Attribute]
}

FileFormatWriter.write(
saveAsHiveFile(
sparkSession = sparkSession,
plan = children.head,
fileFormat = new HiveFileFormat(fileSinkConf),
committer = committer,
outputSpec = FileFormatWriter.OutputSpec(tmpLocation.toString, Map.empty),
hadoopConf = hadoopConf,
partitionColumns = partitionAttributes,
bucketSpec = None,
statsTrackers = Seq(basicWriteJobStatsTracker(hadoopConf)),
options = Map.empty)
fileSinkConf = fileSinkConf,
outputLocation = tmpLocation.toString,
partitionAttributes = partitionAttributes)

if (partition.nonEmpty) {
if (numDynamicPartitions > 0) {
Expand Down
Loading