Skip to content

Commit 468cb6e

Browse files
authored
Merge pull request torvalds#257 from liuyuan10/dirq
lkl: Direct irq and fix direct syscall degration
2 parents 3e18acf + 3b62e66 commit 468cb6e

File tree

6 files changed

+106
-31
lines changed

6 files changed

+106
-31
lines changed

arch/lkl/include/asm/cpu.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,11 @@ int lkl_cpu_try_run_irq(int irq);
77
int lkl_cpu_init(void);
88
void lkl_cpu_shutdown(void);
99
void lkl_cpu_wait_shutdown(void);
10-
void lkl_cpu_wakeup(void);
10+
void lkl_cpu_wakeup_idle(void);
1111
void lkl_cpu_change_owner(lkl_thread_t owner);
1212
void lkl_cpu_set_irqs_pending(void);
13+
void lkl_idle_tail_schedule(void);
14+
int lkl_cpu_idle_pending(void);
15+
extern void cpu_idle_loop(void);
1316

1417
#endif /* _ASM_LKL_CPU_H */

arch/lkl/include/asm/thread_info.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ void threads_cleanup(void);
6060
#define TIF_SCHED_JB 7
6161
#define TIF_SCHED_EXIT 8
6262
#define TIF_HOST_THREAD 9
63+
#define TIF_IDLE 10
6364

6465
static inline void set_ti_thread_flag(struct thread_info *ti, int flag);
6566

arch/lkl/kernel/cpu.c

Lines changed: 82 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,8 @@
1+
#include <linux/cpu.h>
2+
#include <linux/cpuidle.h>
13
#include <linux/kernel.h>
24
#include <linux/sched.h>
5+
#include <linux/tick.h>
36
#include <asm/host_ops.h>
47
#include <asm/cpu.h>
58
#include <asm/thread_info.h>
@@ -50,6 +53,10 @@ struct lkl_cpu {
5053
struct lkl_sem *sem;
5154
/* semaphore for the idle thread */
5255
struct lkl_sem *idle_sem;
56+
/* if the idle thread is pending */
57+
bool idle_pending;
58+
/* jmp_buf used for idle thread to restart */
59+
struct lkl_jmp_buf idle_jb;
5360
/* semaphore used for shutdown */
5461
struct lkl_sem *shutdown_sem;
5562
} cpu;
@@ -126,18 +133,19 @@ void lkl_cpu_put(void)
126133
lkl_ops->mutex_lock(cpu.lock);
127134
}
128135

129-
if (need_resched()) {
136+
if (need_resched() && cpu.count == 1) {
137+
if (in_interrupt())
138+
lkl_bug("%s: in interrupt\n", __func__);
139+
lkl_ops->mutex_unlock(cpu.lock);
130140
if (test_thread_flag(TIF_HOST_THREAD)) {
131-
if (cpu.count == 1 && !in_interrupt()) {
132-
lkl_ops->mutex_unlock(cpu.lock);
133-
set_current_state(TASK_UNINTERRUPTIBLE);
134-
if (!thread_set_sched_jmp())
135-
schedule();
136-
return;
137-
}
141+
set_current_state(TASK_UNINTERRUPTIBLE);
142+
if (!thread_set_sched_jmp())
143+
schedule();
138144
} else {
139-
lkl_cpu_wakeup();
145+
if (!thread_set_sched_jmp())
146+
lkl_idle_tail_schedule();
140147
}
148+
return;
141149
}
142150

143151
if (--cpu.count > 0) {
@@ -210,20 +218,37 @@ void arch_cpu_idle(void)
210218

211219
lkl_ops->thread_exit();
212220
}
213-
214221
/* enable irqs now to allow direct irqs to run */
215222
local_irq_enable();
216223

224+
if (need_resched())
225+
return;
226+
227+
cpu.idle_pending = true;
217228
lkl_cpu_put();
218229

219230
lkl_ops->sem_down(cpu.idle_sem);
220231

221-
lkl_cpu_get();
232+
cpu.idle_pending = false;
233+
/* to match that of schedule_preempt_disabled() */
234+
preempt_disable();
235+
lkl_ops->jmp_buf_longjmp(&cpu.idle_jb, 1);
236+
}
222237

223-
run_irqs();
238+
void arch_cpu_idle_prepare(void)
239+
{
240+
set_ti_thread_flag(current_thread_info(), TIF_IDLE);
241+
/*
242+
* We hijack the idle loop here so that we can let the idle thread
243+
* jump back to the beginning.
244+
*/
245+
while (1) {
246+
if (!lkl_ops->jmp_buf_set(&cpu.idle_jb))
247+
cpu_idle_loop();
248+
}
224249
}
225250

226-
void lkl_cpu_wakeup(void)
251+
void lkl_cpu_wakeup_idle(void)
227252
{
228253
lkl_ops->sem_up(cpu.idle_sem);
229254
}
@@ -242,3 +267,47 @@ int lkl_cpu_init(void)
242267

243268
return 0;
244269
}
270+
271+
/*
272+
* Simulate the exit path of idle loop so that we can schedule when LKL is
273+
* in idle.
274+
* It's just a duplication of those in idle.c so a better way is to refactor
275+
* idle.c to expose such function.
276+
*/
277+
void lkl_idle_tail_schedule(void)
278+
{
279+
280+
if (!cpu.idle_pending ||
281+
!test_bit(TIF_IDLE, &current_thread_info()->flags))
282+
lkl_bug("%s: not in idle\n", __func__);
283+
284+
start_critical_timings();
285+
__current_set_polling();
286+
287+
if (WARN_ON_ONCE(irqs_disabled()))
288+
local_irq_enable();
289+
290+
rcu_idle_exit();
291+
arch_cpu_idle_exit();
292+
preempt_set_need_resched();
293+
tick_nohz_idle_exit();
294+
__current_clr_polling();
295+
296+
/*
297+
* memory barrier copied from idle.c
298+
*/
299+
smp_mb__after_atomic();
300+
301+
/*
302+
* Didn't find a way to include kernel/sched/sched.h for
303+
* sched_ttwu_pending().
304+
* Anyway, it's no op when not CONFIG_SMP.
305+
*/
306+
307+
schedule_preempt_disabled();
308+
}
309+
310+
int lkl_cpu_idle_pending(void)
311+
{
312+
return cpu.idle_pending;
313+
}

arch/lkl/kernel/syscalls.c

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -93,15 +93,12 @@ static unsigned int task_key;
9393
long lkl_syscall(long no, long *params)
9494
{
9595
struct task_struct *task = host0;
96-
static int count;
9796
long ret;
9897

9998
ret = lkl_cpu_get();
10099
if (ret < 0)
101100
return ret;
102101

103-
count++;
104-
105102
if (lkl_ops->tls_get) {
106103
task = lkl_ops->tls_get(task_key);
107104
if (!task) {
@@ -116,16 +113,7 @@ long lkl_syscall(long no, long *params)
116113

117114
ret = run_syscall(no, params);
118115

119-
if (count > 1) {
120-
set_current_state(TASK_UNINTERRUPTIBLE);
121-
if (!thread_set_sched_jmp())
122-
schedule();
123-
count--;
124-
return ret;
125-
}
126-
127116
out:
128-
count--;
129117
lkl_cpu_put();
130118

131119
return ret;

arch/lkl/kernel/threads.c

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -90,17 +90,30 @@ struct task_struct *__switch_to(struct task_struct *prev,
9090
struct thread_info *_prev = task_thread_info(prev);
9191
struct thread_info *_next = task_thread_info(next);
9292
unsigned long _prev_flags = _prev->flags;
93+
bool wakeup_idle = test_bit(TIF_IDLE, &_next->flags) &&
94+
lkl_cpu_idle_pending();
9395

9496
_current_thread_info = task_thread_info(next);
9597
_next->prev_sched = prev;
9698
abs_prev = prev;
9799

98100
BUG_ON(!_next->tid);
99-
lkl_cpu_change_owner(_next->tid);
100101

101-
lkl_ops->sem_up(_next->sched_sem);
102102
if (test_bit(TIF_SCHED_JB, &_prev_flags)) {
103+
/* Atomic. Must be done before wakeup next */
103104
clear_ti_thread_flag(_prev, TIF_SCHED_JB);
105+
}
106+
if (wakeup_idle)
107+
schedule_tail(abs_prev);
108+
lkl_cpu_change_owner(_next->tid);
109+
110+
/* No kernel code is allowed after wakeup next */
111+
if (wakeup_idle)
112+
lkl_cpu_wakeup_idle();
113+
else
114+
lkl_ops->sem_up(_next->sched_sem);
115+
116+
if (test_bit(TIF_SCHED_JB, &_prev_flags)) {
104117
lkl_ops->jmp_buf_longjmp(&_prev->sched_jb, 1);
105118
} else if (test_bit(TIF_SCHED_EXIT, &_prev_flags)) {
106119
lkl_ops->thread_exit();
@@ -132,8 +145,8 @@ void switch_to_host_task(struct task_struct *task)
132145
if (!thread_set_sched_jmp())
133146
schedule();
134147
} else {
135-
lkl_cpu_wakeup();
136-
lkl_cpu_put();
148+
if (!thread_set_sched_jmp())
149+
lkl_idle_tail_schedule();
137150
}
138151

139152
lkl_ops->sem_down(task_thread_info(task)->sched_sem);

kernel/sched/idle.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@ static void cpuidle_idle_call(void)
199199
*
200200
* Called with polling cleared.
201201
*/
202-
static void cpu_idle_loop(void)
202+
void cpu_idle_loop(void)
203203
{
204204
int cpu = smp_processor_id();
205205

@@ -270,6 +270,7 @@ static void cpu_idle_loop(void)
270270
schedule_preempt_disabled();
271271
}
272272
}
273+
EXPORT_SYMBOL(cpu_idle_loop);
273274

274275
void cpu_startup_entry(enum cpuhp_state state)
275276
{

0 commit comments

Comments
 (0)