Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.analysis.TypeCoercion
import org.apache.spark.sql.catalyst.json.JacksonUtils.nextUntil
import org.apache.spark.sql.catalyst.json.JSONOptions
import org.apache.spark.sql.catalyst.util.PermissiveMode
import org.apache.spark.sql.catalyst.util.{DropMalformedMode, FailFastMode, ParseMode, PermissiveMode}
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils

Expand All @@ -41,7 +41,7 @@ private[sql] object JsonInferSchema {
json: RDD[T],
configOptions: JSONOptions,
createParser: (JsonFactory, T) => JsonParser): StructType = {
val shouldHandleCorruptRecord = configOptions.parseMode == PermissiveMode
val parseMode = configOptions.parseMode
val columnNameOfCorruptRecord = configOptions.columnNameOfCorruptRecord

// perform schema inference on each row and merge afterwards
Expand All @@ -55,20 +55,24 @@ private[sql] object JsonInferSchema {
Some(inferField(parser, configOptions))
}
} catch {
case _: JsonParseException if shouldHandleCorruptRecord =>
Some(StructType(Seq(StructField(columnNameOfCorruptRecord, StringType))))
case _: JsonParseException =>
None
case e @ (_: RuntimeException | _: JsonProcessingException) => parseMode match {
case PermissiveMode =>
Some(StructType(Seq(StructField(columnNameOfCorruptRecord, StringType))))
case DropMalformedMode =>
None
case FailFastMode =>
throw e
}
}
}
}.fold(StructType(Seq()))(
compatibleRootType(columnNameOfCorruptRecord, shouldHandleCorruptRecord))
}.fold(StructType(Nil))(
compatibleRootType(columnNameOfCorruptRecord, parseMode))

canonicalizeType(rootType) match {
case Some(st: StructType) => st
case _ =>
// canonicalizeType erases all empty structs, including the only one we want to keep
StructType(Seq())
StructType(Nil)
}
}

Expand Down Expand Up @@ -217,26 +221,43 @@ private[sql] object JsonInferSchema {
}
}

private def withParseMode(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

shall we embed this method in withCorruptField?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure.

struct: StructType,
other: DataType,
columnNameOfCorruptRecords: String,
parseMode: ParseMode) = parseMode match {
// If we see any other data type at the root level, we get records that cannot be
// parsed. So, we use the struct as the data type and add the corrupt field to the schema.
case PermissiveMode => withCorruptField(struct, columnNameOfCorruptRecords)

// If corrupt record handling is disabled we retain the valid schema and discard the other.
case DropMalformedMode => struct

// If `other` is not struct type, consider it as malformed one and throws an exception.
case FailFastMode =>
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

When will we hit this branch? Seems never?

Copy link
Member Author

@HyukjinKwon HyukjinKwon Apr 2, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It looks possible to run this line. I added a test in 1936L in JsonSuite. In more details, if the json is a valid one but not a object of an areay of object, it will infer not StructType per record. If one of the types is not a struct type and failfast mode is enabled, we will hit this line.

I just checked as below:

2017-04-02 10 46 39

throw new RuntimeException("Failed to infer a common schema. Struct types are expected" +
s" but ${other.catalogString} was found.")
}

/**
* Remove top-level ArrayType wrappers and merge the remaining schemas
*/
private def compatibleRootType(
columnNameOfCorruptRecords: String,
shouldHandleCorruptRecord: Boolean): (DataType, DataType) => DataType = {
parseMode: ParseMode): (DataType, DataType) => DataType = {
// Since we support array of json objects at the top level,
// we need to check the element type and find the root level data type.
case (ArrayType(ty1, _), ty2) =>
compatibleRootType(columnNameOfCorruptRecords, shouldHandleCorruptRecord)(ty1, ty2)
compatibleRootType(columnNameOfCorruptRecords, parseMode)(ty1, ty2)
case (ty1, ArrayType(ty2, _)) =>
compatibleRootType(columnNameOfCorruptRecords, shouldHandleCorruptRecord)(ty1, ty2)
// If we see any other data type at the root level, we get records that cannot be
// parsed. So, we use the struct as the data type and add the corrupt field to the schema.
compatibleRootType(columnNameOfCorruptRecords, parseMode)(ty1, ty2)
// Discard null/empty documents
case (struct: StructType, NullType) => struct
case (NullType, struct: StructType) => struct
case (struct: StructType, o) if !o.isInstanceOf[StructType] && shouldHandleCorruptRecord =>
withCorruptField(struct, columnNameOfCorruptRecords)
case (o, struct: StructType) if !o.isInstanceOf[StructType] && shouldHandleCorruptRecord =>
withCorruptField(struct, columnNameOfCorruptRecords)
case (struct: StructType, o) if !o.isInstanceOf[StructType] =>
withParseMode(struct, o, columnNameOfCorruptRecords, parseMode)
case (o, struct: StructType) if !o.isInstanceOf[StructType] =>
withParseMode(struct, o, columnNameOfCorruptRecords, parseMode)
// If we get anything else, we call compatibleType.
// Usually, when we reach here, ty1 and ty2 are two StructTypes.
case (ty1, ty2) => compatibleType(ty1, ty2)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1041,7 +1041,6 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
spark.read
.option("mode", "FAILFAST")
.json(corruptRecords)
.collect()
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I intended to remove this line to make sure it happens in schrma inference. I think this is not related with the change here.

}
assert(exceptionOne.getMessage.contains("JsonParseException"))

Expand Down Expand Up @@ -1082,6 +1081,18 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
assert(jsonDFTwo.schema === schemaTwo)
}

test("SPARK-19641: Additional corrupt records: DROPMALFORMED mode") {
val schema = new StructType().add("dummy", StringType)
// `DROPMALFORMED` mode should skip corrupt records
val jsonDF = spark.read
.option("mode", "DROPMALFORMED")
.json(additionalCorruptRecords)
checkAnswer(
jsonDF,
Row("test"))
assert(jsonDF.schema === schema)
}

test("Corrupt records: PERMISSIVE mode, without designated column for malformed records") {
val schema = StructType(
StructField("a", StringType, true) ::
Expand Down Expand Up @@ -1882,6 +1893,24 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
}
}

test("SPARK-19641: Handle multi-line corrupt documents (DROPMALFORMED)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val corruptRecordCount = additionalCorruptRecords.count().toInt
assert(corruptRecordCount === 5)

additionalCorruptRecords
.toDF("value")
// this is the minimum partition count that avoids hash collisions
.repartition(corruptRecordCount * 4, F.hash($"value"))
.write
.text(path)

val jsonDF = spark.read.option("wholeFile", true).option("mode", "DROPMALFORMED").json(path)
checkAnswer(jsonDF, Seq(Row("test")))
}
}

test("SPARK-18352: Handle multi-line corrupt documents (FAILFAST)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
Expand All @@ -1903,9 +1932,8 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
.option("wholeFile", true)
.option("mode", "FAILFAST")
.json(path)
.collect()
}
assert(exceptionOne.getMessage.contains("Failed to parse a value"))
assert(exceptionOne.getMessage.contains("Failed to infer a common schema"))
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Up to my knowledge, this test case throws an exception when actually parsing the data before. Now, I intended to check the exception in schema inference as it looks parsing data case is covered below.


val exceptionTwo = intercept[SparkException] {
spark.read
Expand Down