Skip to content

Commit 7ee4cf1

Browse files
committed
Fix indent
1 parent afd5c0f commit 7ee4cf1

File tree

2 files changed

+19
-19
lines changed

2 files changed

+19
-19
lines changed

sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -647,38 +647,38 @@ object StreamMetadata extends Logging {
647647
/** Read the metadata from file if it exists */
648648
def read(metadataFile: Path, hadoopConf: Configuration): Option[StreamMetadata] = {
649649
val fs = FileSystem.get(hadoopConf)
650-
if (fs.exists(metadataFile)) {
651-
var input: FSDataInputStream = null
652-
try {
653-
input = fs.open(metadataFile)
654-
val reader = new InputStreamReader(input, StandardCharsets.UTF_8)
655-
val metadata = Serialization.read[StreamMetadata](reader)
656-
Some(metadata)
657-
} catch {
658-
case NonFatal(e) =>
659-
logError(s"Error reading stream metadata from $metadataFile", e)
660-
throw e
661-
} finally {
662-
IOUtils.closeQuietly(input)
663-
}
664-
} else None
650+
if (fs.exists(metadataFile)) {
651+
var input: FSDataInputStream = null
652+
try {
653+
input = fs.open(metadataFile)
654+
val reader = new InputStreamReader(input, StandardCharsets.UTF_8)
655+
val metadata = Serialization.read[StreamMetadata](reader)
656+
Some(metadata)
657+
} catch {
658+
case NonFatal(e) =>
659+
logError(s"Error reading stream metadata from $metadataFile", e)
660+
throw e
661+
} finally {
662+
IOUtils.closeQuietly(input)
663+
}
664+
} else None
665665
}
666666

667-
/** Write metadata to file, overwrite if it exists */
667+
/** Write metadata to file */
668668
def write(
669669
metadata: StreamMetadata,
670670
metadataFile: Path,
671671
hadoopConf: Configuration): Unit = {
672672
var output: FSDataOutputStream = null
673673
try {
674674
val fs = FileSystem.get(hadoopConf)
675-
output = fs.create(metadataFile, true) // overwrite if exists
675+
output = fs.create(metadataFile)
676676
val writer = new OutputStreamWriter(output)
677677
Serialization.write(metadata, writer)
678678
writer.close()
679679
} catch {
680680
case NonFatal(e) =>
681-
logError(s"Error writing stream metedata $metadata to $metadataFile", e)
681+
logError(s"Error writing stream metadata $metadata to $metadataFile", e)
682682
throw e
683683
} finally {
684684
IOUtils.closeQuietly(output)

sql/core/src/main/scala/org/apache/spark/sql/streaming/progress.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ class StateOperatorProgress private[sql](
5252
*
5353
* @param id An unique query id that persists across restarts. See `StreamingQuery.id()`.
5454
* @param runId A query id that is unique for every start/restart. See `StreamingQuery.runId()`.
55-
* @param name User-specified name of the query.
55+
* @param name User-specified name of the query, null if not specified.
5656
* @param timestamp Timestamp (ms) of the beginning of the trigger.
5757
* @param batchId A unique id for the current batch of data being processed. Note that in the
5858
* case of retries after a failure a given batchId my be executed more than once.

0 commit comments

Comments
 (0)