Skip to content

Commit 1a0b89b

Browse files
authored
Merge pull request #174 from f9micro/prio-sched
Implement priority bitmap scheduler
2 parents ea27381 + ab3bba5 commit 1a0b89b

File tree

13 files changed

+751
-165
lines changed

13 files changed

+751
-165
lines changed

include/sched.h

Lines changed: 69 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -6,33 +6,83 @@
66
#ifndef SCHED_H_
77
#define SCHED_H_
88

9-
#include <thread.h>
9+
#include <types.h>
1010

11-
typedef enum {
12-
SSI_SOFTIRQ, /* Kernel thread */
13-
SSI_INTR_THREAD,
14-
SSI_ROOT_THREAD,
15-
SSI_IPC_THREAD,
16-
SSI_NORMAL_THREAD,
17-
SSI_IDLE,
11+
/* Forward declaration - full definition in thread.h */
12+
struct tcb;
1813

19-
NUM_SCHED_SLOTS
20-
} sched_slot_id_t;
14+
/**
15+
* @file sched.h
16+
* @brief Priority Bitmap Scheduler
17+
*
18+
* 32-level priority scheduler with O(1) highest-priority selection.
19+
* Uses Cortex-M CLZ instruction for efficient bitmap scanning.
20+
*
21+
* Priority 0 is highest, priority 31 is lowest (idle).
22+
* Multiple threads at same priority use round-robin scheduling.
23+
*/
24+
25+
/* Number of priority levels (0 = highest, 31 = lowest) */
26+
#define SCHED_PRIORITY_LEVELS 32
2127

22-
struct sched_slot;
28+
/* Priority assignments for system threads */
29+
#define SCHED_PRIO_SOFTIRQ 0 /* Kernel softirq thread */
30+
#define SCHED_PRIO_INTR 1 /* Interrupt handler threads */
31+
#define SCHED_PRIO_ROOT 2 /* Root thread */
32+
#define SCHED_PRIO_IPC 3 /* IPC fast path */
33+
#define SCHED_PRIO_NORMAL_MIN 4 /* Normal threads start here */
34+
#define SCHED_PRIO_NORMAL_MAX 30 /* Normal threads end here */
35+
#define SCHED_PRIO_IDLE 31 /* Idle thread (always lowest) */
2336

24-
typedef tcb_t *(*sched_handler_t)(struct sched_slot *slot);
37+
/* Default priority for user threads */
38+
#define SCHED_PRIO_DEFAULT 16
2539

26-
typedef struct sched_slot {
27-
tcb_t *ss_scheduled;
28-
sched_handler_t ss_handler;
29-
} sched_slot_t;
40+
/**
41+
* Linked list node for ready queue.
42+
* Embedded in TCB for zero-allocation enqueueing.
43+
*/
44+
typedef struct sched_link {
45+
struct sched_link *prev, *next;
46+
} sched_link_t;
3047

48+
/* Scheduler initialization */
3149
void sched_init(void);
3250

33-
tcb_t* schedule_select(void);
51+
/* Core scheduling functions */
52+
struct tcb *schedule_select(void);
3453
int schedule(void);
35-
void sched_slot_dispatch(sched_slot_id_t slot_id, tcb_t *thread);
36-
void sched_slot_set_handler(sched_slot_id_t slot_id, sched_handler_t handler);
54+
55+
/**
56+
* Enqueue thread to ready queue at its priority level.
57+
* Thread must have valid priority set.
58+
*/
59+
void sched_enqueue(struct tcb *thread);
60+
61+
/**
62+
* Dequeue thread from ready queue.
63+
* Called when thread blocks or is destroyed.
64+
*/
65+
void sched_dequeue(struct tcb *thread);
66+
67+
/**
68+
* Check if thread is currently in a ready queue.
69+
*/
70+
int sched_is_queued(struct tcb *thread);
71+
72+
/**
73+
* Yield current thread's timeslice.
74+
* Rotates thread to back of its priority queue for round-robin.
75+
*/
76+
void sched_yield(void);
77+
78+
/**
79+
* Change thread priority safely.
80+
* Handles queue migration atomically if thread is queued.
81+
* Use this instead of directly modifying thread->priority.
82+
*
83+
* @param thread Thread to modify
84+
* @param new_prio New priority level (0 = highest, 31 = lowest)
85+
*/
86+
void sched_set_priority(struct tcb *thread, uint8_t new_prio);
3787

3888
#endif /* SCHED_H_ */

include/thread.h

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -85,13 +85,22 @@ typedef struct {
8585
* Contains pointers to thread's UTCB (User TCB) and address space
8686
*/
8787
struct tcb {
88-
l4_thread_t t_globalid;
89-
l4_thread_t t_localid;
90-
91-
thread_state_t state;
92-
93-
memptr_t stack_base;
94-
size_t stack_size;
88+
/* Hot scheduler fields - Cache Line 0 */
89+
struct {
90+
struct tcb *prev, *next;
91+
} sched_link; /* 8 bytes (0-7) */
92+
93+
thread_state_t state; /* 4 bytes (8-11) */
94+
uint8_t priority; /* 1 byte (12) - effective priority */
95+
uint8_t base_priority; /* 1 byte (13) - natural priority */
96+
uint8_t _sched_pad[2]; /* 2 bytes (14-15) - Alignment */
97+
98+
l4_thread_t t_globalid; /* 4 bytes (16-19) */
99+
l4_thread_t t_localid; /* 4 bytes (20-23) */
100+
101+
memptr_t stack_base; /* 4 bytes (24-27) */
102+
size_t stack_size; /* 4 bytes (28-31) */
103+
/* End of Cache Line 0 (32 bytes) */
95104

96105
context_t ctx;
97106

kernel/interrupt.c

Lines changed: 7 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
#include <thread.h>
22
#include <ipc.h>
3+
#include <sched.h>
34
#include <platform/irq.h>
45
#include <interrupt.h>
56
#include <interrupt_ipc.h>
@@ -72,18 +73,6 @@ static void user_irq_queue_push(struct user_irq *uirq)
7273
}
7374
}
7475

75-
static struct user_irq *user_irq_queue_pop(void)
76-
{
77-
if (user_irq_queue_is_empty())
78-
return NULL;
79-
80-
struct user_irq *uirq = user_irq_queue.head;
81-
user_irq_queue.head = uirq->next;
82-
uirq->next = NULL;
83-
84-
return uirq;
85-
}
86-
8776
static void user_irq_queue_delete(int irq)
8877
{
8978
struct user_irq *uirq = user_irqs[irq];
@@ -173,6 +162,11 @@ static int irq_handler_enable(int irq)
173162

174163
irq_handler_ipc(uirq);
175164

165+
/* Wake up the interrupt thread directly */
166+
thr->priority = SCHED_PRIO_INTR;
167+
thr->state = T_RUNNABLE;
168+
sched_enqueue(thr);
169+
176170
return 0;
177171
}
178172

@@ -191,24 +185,6 @@ static void irq_schedule(int irq)
191185
irq_handler_enable(irq);
192186
}
193187

194-
static tcb_t *irq_handler_sched(struct sched_slot *slot)
195-
{
196-
tcb_t *thr = NULL;
197-
198-
irq_disable();
199-
struct user_irq *uirq = user_irq_queue_pop();
200-
201-
if (uirq && (thr = uirq->thr) &&
202-
thr->state == T_RECV_BLOCKED) {
203-
thr->state = T_RUNNABLE;
204-
sched_slot_dispatch(SSI_INTR_THREAD, thr);
205-
}
206-
207-
irq_enable();
208-
209-
return thr;
210-
}
211-
212188
void __interrupt_handler(int irq)
213189
{
214190
struct user_irq *uirq = user_irq_fetch(irq);
@@ -227,7 +203,6 @@ void __interrupt_handler(int irq)
227203
void interrupt_init(void)
228204
{
229205
user_irq_reset_all();
230-
sched_slot_set_handler(SSI_INTR_THREAD, irq_handler_sched);
231206
}
232207

233208
INIT_HOOK(interrupt_init, INIT_LEVEL_KERNEL_EARLY);
@@ -300,6 +275,7 @@ void user_interrupt_handler_update(tcb_t *thr)
300275
/* reply ipc immediately */
301276
irq_handler_ipc(uirq);
302277
thr->state = T_RUNNABLE;
278+
sched_enqueue(thr);
303279
break;
304280
}
305281
break;

kernel/ipc.c

Lines changed: 36 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,16 @@ extern tcb_t *caller;
2323
extern tcb_t *thread_map[];
2424
extern int thread_count;
2525

26+
/**
27+
* Make thread runnable and enqueue to scheduler.
28+
* Used after IPC operations that unblock threads.
29+
*/
30+
static inline void thread_make_runnable(tcb_t *thr)
31+
{
32+
thr->state = T_RUNNABLE;
33+
sched_enqueue(thr);
34+
}
35+
2636
uint32_t ipc_read_mr(tcb_t *from, int i)
2737
{
2838
if (i >= 8)
@@ -179,25 +189,26 @@ static void do_ipc(tcb_t *from, tcb_t *to)
179189

180190
to->utcb->sender = from->t_globalid;
181191

182-
to->state = T_RUNNABLE;
192+
/* Temporarily boost receiver priority for IPC fast path.
193+
* base_priority is preserved; effective priority restored
194+
* when thread is descheduled (in thread_switch).
195+
*/
196+
sched_set_priority(to, SCHED_PRIO_IPC);
197+
thread_make_runnable(to);
183198
to->ipc_from = L4_NILTHREAD;
184199
((uint32_t *) to->ctx.sp)[REG_R0] = from->t_globalid;
185200

186201
/* If from has receive phases, lock myself */
187202
from_recv_tid = ((uint32_t *) from->ctx.sp)[REG_R1];
188203
if (from_recv_tid == L4_NILTHREAD) {
189-
from->state = T_RUNNABLE;
204+
thread_make_runnable(from);
190205
} else {
191206
from->state = T_RECV_BLOCKED;
192207
from->ipc_from = from_recv_tid;
193208

194209
dbg_printf(DL_IPC, "IPC: %t receiving\n", from->t_globalid);
195210
}
196211

197-
/* Dispatch communicating threads */
198-
sched_slot_dispatch(SSI_NORMAL_THREAD, from);
199-
sched_slot_dispatch(SSI_IPC_THREAD, to);
200-
201212
dbg_printf(DL_IPC,
202213
"IPC: %t→%t done\n", from->t_globalid, to->t_globalid);
203214
}
@@ -207,6 +218,9 @@ uint32_t ipc_timeout(void *data)
207218
ktimer_event_t *event = (ktimer_event_t *) data;
208219
tcb_t *thr = (tcb_t *) event->data;
209220

221+
dbg_printf(DL_KDB, "IPC: timeout tid=%t st=%d\n",
222+
thr->t_globalid, thr->state);
223+
210224
if (thr->timeout_event == (uint32_t)data) {
211225

212226
if (thr->state == T_RECV_BLOCKED)
@@ -215,7 +229,7 @@ uint32_t ipc_timeout(void *data)
215229
if (thr->state == T_SEND_BLOCKED)
216230
user_ipc_error(thr, UE_IPC_TIMEOUT | UE_IPC_PHASE_SEND);
217231

218-
thr->state = T_RUNNABLE;
232+
thread_make_runnable(thr);
219233
thr->timeout_event = 0;
220234
}
221235

@@ -233,6 +247,9 @@ static void sys_ipc_timeout(uint32_t timeout)
233247

234248
kevent = ktimer_event_create(ticks, ipc_timeout, caller);
235249

250+
dbg_printf(DL_KDB, "IPC: sched timeout ticks=%d ev=%p\n",
251+
ticks, kevent);
252+
236253
caller->timeout_event = (uint32_t) kevent;
237254
}
238255

@@ -247,6 +264,8 @@ void sys_ipc(uint32_t *param1)
247264

248265
if (to_tid == L4_NILTHREAD &&
249266
from_tid == L4_NILTHREAD) {
267+
dbg_printf(DL_KDB, "IPC: sleep tid=%t timeout=%p\n",
268+
caller->t_globalid, timeout);
250269
caller->state = T_INACTIVE;
251270
if (timeout)
252271
sys_ipc_timeout(timeout);
@@ -258,11 +277,11 @@ void sys_ipc(uint32_t *param1)
258277

259278
if (to_tid == TID_TO_GLOBALID(THREAD_LOG)) {
260279
user_log(caller);
261-
caller->state = T_RUNNABLE;
280+
thread_make_runnable(caller);
262281
return;
263282
} else if (to_tid == TID_TO_GLOBALID(THREAD_IRQ_REQUEST)) {
264283
user_interrupt_config(caller);
265-
caller->state = T_RUNNABLE;
284+
thread_make_runnable(caller);
266285
return;
267286
} else if (to_thr &&
268287
(to_thr->state == T_RECV_BLOCKED ||
@@ -297,7 +316,7 @@ void sys_ipc(uint32_t *param1)
297316
to_tid, sp - stack_size, mp ? mp->name : "N/A");
298317
user_ipc_error(caller,
299318
UE_IPC_ABORTED | UE_IPC_PHASE_SEND);
300-
caller->state = T_RUNNABLE;
319+
thread_make_runnable(caller);
301320
return;
302321
}
303322

@@ -317,10 +336,10 @@ void sys_ipc(uint32_t *param1)
317336
(void *) ipc_read_mr(caller, 1),
318337
regs, to_thr);
319338

320-
caller->state = T_RUNNABLE;
339+
thread_make_runnable(caller);
321340

322341
/* Start thread */
323-
to_thr->state = T_RUNNABLE;
342+
thread_make_runnable(to_thr);
324343

325344
return;
326345
} else {
@@ -340,7 +359,7 @@ void sys_ipc(uint32_t *param1)
340359
if (typed_last > IPC_MR_COUNT) {
341360
user_ipc_error(caller,
342361
UE_IPC_MSG_OVERFLOW | UE_IPC_PHASE_SEND);
343-
caller->state = T_RUNNABLE;
362+
thread_make_runnable(caller);
344363
return;
345364
}
346365

@@ -365,7 +384,7 @@ void sys_ipc(uint32_t *param1)
365384
dbg_printf(DL_IPC,
366385
"IPC: REJECT unaligned map to INACTIVE %p\n",
367386
map_base);
368-
caller->state = T_RUNNABLE;
387+
thread_make_runnable(caller);
369388
return;
370389
}
371390

@@ -385,7 +404,7 @@ void sys_ipc(uint32_t *param1)
385404
}
386405

387406
/* Keep thread INACTIVE, sender continues */
388-
caller->state = T_RUNNABLE;
407+
thread_make_runnable(caller);
389408
return;
390409
}
391410
} else {
@@ -400,7 +419,7 @@ void sys_ipc(uint32_t *param1)
400419
caller->t_globalid, to_tid);
401420
user_ipc_error(caller,
402421
UE_IPC_ABORTED | UE_IPC_PHASE_SEND);
403-
caller->state = T_RUNNABLE;
422+
thread_make_runnable(caller);
404423
return;
405424
}
406425

@@ -414,7 +433,7 @@ void sys_ipc(uint32_t *param1)
414433
caller->t_globalid, to_tid);
415434
user_ipc_error(caller,
416435
UE_IPC_ABORTED | UE_IPC_PHASE_SEND);
417-
caller->state = T_RUNNABLE;
436+
thread_make_runnable(caller);
418437
return;
419438
}
420439

0 commit comments

Comments
 (0)