@@ -354,16 +354,23 @@ static inline int has_pushable_tasks(struct rq *rq)
354354 return !plist_head_empty (& rq -> rt .pushable_tasks );
355355}
356356
357- static DEFINE_PER_CPU (struct callback_head , rt_balance_head ) ;
357+ static DEFINE_PER_CPU (struct callback_head , rt_push_head ) ;
358+ static DEFINE_PER_CPU (struct callback_head , rt_pull_head ) ;
358359
359360static void push_rt_tasks (struct rq * );
361+ static void pull_rt_task (struct rq * );
360362
361363static inline void queue_push_tasks (struct rq * rq )
362364{
363365 if (!has_pushable_tasks (rq ))
364366 return ;
365367
366- queue_balance_callback (rq , & per_cpu (rt_balance_head , rq -> cpu ), push_rt_tasks );
368+ queue_balance_callback (rq , & per_cpu (rt_push_head , rq -> cpu ), push_rt_tasks );
369+ }
370+
371+ static inline void queue_pull_task (struct rq * rq )
372+ {
373+ queue_balance_callback (rq , & per_cpu (rt_pull_head , rq -> cpu ), pull_rt_task );
367374}
368375
369376static void enqueue_pushable_task (struct rq * rq , struct task_struct * p )
@@ -2139,7 +2146,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
21392146 if (!task_on_rq_queued (p ) || rq -> rt .rt_nr_running )
21402147 return ;
21412148
2142- pull_rt_task (rq );
2149+ queue_pull_task (rq );
21432150}
21442151
21452152void __init init_sched_rt_class (void )
@@ -2160,8 +2167,6 @@ void __init init_sched_rt_class(void)
21602167 */
21612168static void switched_to_rt (struct rq * rq , struct task_struct * p )
21622169{
2163- int check_resched = 1 ;
2164-
21652170 /*
21662171 * If we are already running, then there's nothing
21672172 * that needs to be done. But if we are not running
@@ -2171,13 +2176,12 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
21712176 */
21722177 if (task_on_rq_queued (p ) && rq -> curr != p ) {
21732178#ifdef CONFIG_SMP
2174- if (p -> nr_cpus_allowed > 1 && rq -> rt .overloaded &&
2175- /* Don't resched if we changed runqueues */
2176- push_rt_task (rq ) && rq != task_rq (p ))
2177- check_resched = 0 ;
2178- #endif /* CONFIG_SMP */
2179- if (check_resched && p -> prio < rq -> curr -> prio )
2179+ if (p -> nr_cpus_allowed > 1 && rq -> rt .overloaded )
2180+ queue_push_tasks (rq );
2181+ #else
2182+ if (p -> prio < rq -> curr -> prio )
21802183 resched_curr (rq );
2184+ #endif /* CONFIG_SMP */
21812185 }
21822186}
21832187
@@ -2198,14 +2202,13 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
21982202 * may need to pull tasks to this runqueue.
21992203 */
22002204 if (oldprio < p -> prio )
2201- pull_rt_task (rq );
2205+ queue_pull_task (rq );
2206+
22022207 /*
22032208 * If there's a higher priority task waiting to run
2204- * then reschedule. Note, the above pull_rt_task
2205- * can release the rq lock and p could migrate.
2206- * Only reschedule if p is still on the same runqueue.
2209+ * then reschedule.
22072210 */
2208- if (p -> prio > rq -> rt .highest_prio .curr && rq -> curr == p )
2211+ if (p -> prio > rq -> rt .highest_prio .curr )
22092212 resched_curr (rq );
22102213#else
22112214 /* For UP simply resched on drop of prio */
0 commit comments