File tree Expand file tree Collapse file tree 1 file changed +5
-3
lines changed
core/src/main/scala/org/apache/spark/scheduler Expand file tree Collapse file tree 1 file changed +5
-3
lines changed Original file line number Diff line number Diff line change @@ -447,7 +447,7 @@ private[spark] class TaskSchedulerImpl(
447447 abortTimer.schedule(
448448 createUnschedulableTaskSetAbortTimer(taskSet, taskIndex), timeout)
449449 }
450- case _ => // Abort Immediately
450+ case None => // Abort Immediately
451451 logInfo(" Cannot schedule any task because of complete blacklisting. No idle" +
452452 s " executors can be found to kill. Aborting $taskSet. " )
453453 taskSet.abortSinceCompletelyBlacklisted(taskIndex)
@@ -456,8 +456,10 @@ private[spark] class TaskSchedulerImpl(
456456 } else {
457457 // We want to defer killing any taskSets as long as we have a non blacklisted executor
458458 // which can be used to schedule a task from any active taskSets. This ensures that the
459- // job can make progress and if we encounter a flawed taskSet it will eventually either
460- // fail or abort due to being completely blacklisted.
459+ // job can make progress.
460+ // Note: It is theoretically possible that a taskSet never gets scheduled on a
461+ // non-blacklisted executor and the abort timer doesn't kick in because of a constant
462+ // submission of new TaskSets. See the PR for more details.
461463 if (unschedulableTaskSetToExpiryTime.nonEmpty) {
462464 logInfo(" Clearing the expiry times for all unschedulable taskSets as a task was " +
463465 " recently scheduled." )
You can’t perform that action at this time.
0 commit comments