1+ #include <linux/cpu.h>
2+ #include <linux/cpuidle.h>
13#include <linux/kernel.h>
24#include <linux/sched.h>
5+ #include <linux/tick.h>
36#include <asm/host_ops.h>
47#include <asm/cpu.h>
58#include <asm/thread_info.h>
@@ -50,6 +53,10 @@ struct lkl_cpu {
5053 struct lkl_sem * sem ;
5154 /* semaphore for the idle thread */
5255 struct lkl_sem * idle_sem ;
56+ /* if the idle thread is pending */
57+ bool idle_pending ;
58+ /* jmp_buf used for idle thread to restart */
59+ struct lkl_jmp_buf idle_jb ;
5360 /* semaphore used for shutdown */
5461 struct lkl_sem * shutdown_sem ;
5562} cpu ;
@@ -126,18 +133,19 @@ void lkl_cpu_put(void)
126133 lkl_ops -> mutex_lock (cpu .lock );
127134 }
128135
129- if (need_resched ()) {
136+ if (need_resched () && cpu .count == 1 ) {
137+ if (in_interrupt ())
138+ lkl_bug ("%s: in interrupt\n" , __func__ );
139+ lkl_ops -> mutex_unlock (cpu .lock );
130140 if (test_thread_flag (TIF_HOST_THREAD )) {
131- if (cpu .count == 1 && !in_interrupt ()) {
132- lkl_ops -> mutex_unlock (cpu .lock );
133- set_current_state (TASK_UNINTERRUPTIBLE );
134- if (!thread_set_sched_jmp ())
135- schedule ();
136- return ;
137- }
141+ set_current_state (TASK_UNINTERRUPTIBLE );
142+ if (!thread_set_sched_jmp ())
143+ schedule ();
138144 } else {
139- lkl_cpu_wakeup ();
145+ if (!thread_set_sched_jmp ())
146+ lkl_idle_tail_schedule ();
140147 }
148+ return ;
141149 }
142150
143151 if (-- cpu .count > 0 ) {
@@ -210,20 +218,37 @@ void arch_cpu_idle(void)
210218
211219 lkl_ops -> thread_exit ();
212220 }
213-
214221 /* enable irqs now to allow direct irqs to run */
215222 local_irq_enable ();
216223
224+ if (need_resched ())
225+ return ;
226+
227+ cpu .idle_pending = true;
217228 lkl_cpu_put ();
218229
219230 lkl_ops -> sem_down (cpu .idle_sem );
220231
221- lkl_cpu_get ();
232+ cpu .idle_pending = false;
233+ /* to match that of schedule_preempt_disabled() */
234+ preempt_disable ();
235+ lkl_ops -> jmp_buf_longjmp (& cpu .idle_jb , 1 );
236+ }
222237
223- run_irqs ();
238+ void arch_cpu_idle_prepare (void )
239+ {
240+ set_ti_thread_flag (current_thread_info (), TIF_IDLE );
241+ /*
242+ * We hijack the idle loop here so that we can let the idle thread
243+ * jump back to the beginning.
244+ */
245+ while (1 ) {
246+ if (!lkl_ops -> jmp_buf_set (& cpu .idle_jb ))
247+ cpu_idle_loop ();
248+ }
224249}
225250
226- void lkl_cpu_wakeup (void )
251+ void lkl_cpu_wakeup_idle (void )
227252{
228253 lkl_ops -> sem_up (cpu .idle_sem );
229254}
@@ -242,3 +267,47 @@ int lkl_cpu_init(void)
242267
243268 return 0 ;
244269}
270+
271+ /*
272+ * Simulate the exit path of idle loop so that we can schedule when LKL is
273+ * in idle.
274+ * It's just a duplication of those in idle.c so a better way is to refactor
275+ * idle.c to expose such function.
276+ */
277+ void lkl_idle_tail_schedule (void )
278+ {
279+
280+ if (!cpu .idle_pending ||
281+ !test_bit (TIF_IDLE , & current_thread_info ()-> flags ))
282+ lkl_bug ("%s: not in idle\n" , __func__ );
283+
284+ start_critical_timings ();
285+ __current_set_polling ();
286+
287+ if (WARN_ON_ONCE (irqs_disabled ()))
288+ local_irq_enable ();
289+
290+ rcu_idle_exit ();
291+ arch_cpu_idle_exit ();
292+ preempt_set_need_resched ();
293+ tick_nohz_idle_exit ();
294+ __current_clr_polling ();
295+
296+ /*
297+ * memory barrier copied from idle.c
298+ */
299+ smp_mb__after_atomic ();
300+
301+ /*
302+ * Didn't find a way to include kernel/sched/sched.h for
303+ * sched_ttwu_pending().
304+ * Anyway, it's no op when not CONFIG_SMP.
305+ */
306+
307+ schedule_preempt_disabled ();
308+ }
309+
310+ int lkl_cpu_idle_pending (void )
311+ {
312+ return cpu .idle_pending ;
313+ }
0 commit comments