Skip to content

Commit cd09782

Browse files
paulmckrcuintel-lab-lkp
authored andcommitted
rcu: Don't invoke try_invoke_on_locked_down_task() with irqs disabled
The try_invoke_on_locked_down_task() function requires that interrupts be enabled, but it is called with interrupts disabled from rcu_print_task_stall(), resulting in an "IRQs not enabled as expected" diagnostic. This commit therefore updates rcu_print_task_stall() to accumulate a list of the first few tasks while holding the current leaf rcu_node structure's ->lock, then releases that lock and only then uses try_invoke_on_locked_down_task() to attempt to obtain per-task detailed information. Of course, as soon as ->lock is released, the task might exit, so the get_task_struct() function is used to prevent the task structure from going away in the meantime. Link: https://lore.kernel.org/lkml/[email protected]/ Reported-by: [email protected] Reported-by: [email protected] Tested-by: Sebastian Andrzej Siewior <[email protected]> Signed-off-by: Paul E. McKenney <[email protected]>
1 parent 6f72faf commit cd09782

File tree

1 file changed

+17
-5
lines changed

1 file changed

+17
-5
lines changed

kernel/rcu/tree_stall.h

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -249,13 +249,16 @@ static bool check_slow_task(struct task_struct *t, void *arg)
249249

250250
/*
251251
* Scan the current list of tasks blocked within RCU read-side critical
252-
* sections, printing out the tid of each.
252+
* sections, printing out the tid of each of the first few of them.
253253
*/
254-
static int rcu_print_task_stall(struct rcu_node *rnp)
254+
static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
255+
__releases(rnp->lock)
255256
{
257+
int i = 0;
256258
int ndetected = 0;
257259
struct rcu_stall_chk_rdr rscr;
258260
struct task_struct *t;
261+
struct task_struct *ts[8];
259262

260263
if (!rcu_preempt_blocked_readers_cgp(rnp))
261264
return 0;
@@ -264,6 +267,14 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
264267
t = list_entry(rnp->gp_tasks->prev,
265268
struct task_struct, rcu_node_entry);
266269
list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
270+
get_task_struct(t);
271+
ts[i++] = t;
272+
if (i >= ARRAY_SIZE(ts))
273+
break;
274+
}
275+
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
276+
for (i--; i; i--) {
277+
t = ts[i];
267278
if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
268279
pr_cont(" P%d", t->pid);
269280
else
@@ -273,6 +284,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
273284
".q"[rscr.rs.b.need_qs],
274285
".e"[rscr.rs.b.exp_hint],
275286
".l"[rscr.on_blkd_list]);
287+
put_task_struct(t);
276288
ndetected++;
277289
}
278290
pr_cont("\n");
@@ -293,8 +305,9 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
293305
* Because preemptible RCU does not exist, we never have to check for
294306
* tasks blocked within RCU read-side critical sections.
295307
*/
296-
static int rcu_print_task_stall(struct rcu_node *rnp)
308+
static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
297309
{
310+
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
298311
return 0;
299312
}
300313
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
@@ -472,15 +485,14 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
472485
pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
473486
rcu_for_each_leaf_node(rnp) {
474487
raw_spin_lock_irqsave_rcu_node(rnp, flags);
475-
ndetected += rcu_print_task_stall(rnp);
476488
if (rnp->qsmask != 0) {
477489
for_each_leaf_node_possible_cpu(rnp, cpu)
478490
if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
479491
print_cpu_stall_info(cpu);
480492
ndetected++;
481493
}
482494
}
483-
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
495+
ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock.
484496
}
485497

486498
for_each_possible_cpu(cpu)

0 commit comments

Comments
 (0)