Skip to content

Commit f68f19a

Browse files
authored
Merge pull request #14722 from bergzand/pr/sched/runqueue_clz
sched: Reverse runqueue order when CLZ is available
2 parents 36ecb40 + ab1d0b6 commit f68f19a

File tree

1 file changed

+37
-3
lines changed

1 file changed

+37
-3
lines changed

core/sched.c

Lines changed: 37 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,40 @@ static void (*sched_cb) (kernel_pid_t active_thread,
8383
kernel_pid_t next_thread) = NULL;
8484
#endif
8585

86+
/* Depending on whether the CLZ instruction is available, the order of the
87+
* runqueue_bitcache is reversed. When the instruction is available, it is
88+
* faster to determine the MSBit set. When it is not available it is faster to
89+
* determine the LSBit set. These functions abstract the runqueue modifications
90+
* and readout away, switching between the two orders depending on the CLZ
91+
* instruction availability
92+
*/
93+
static inline void _set_runqueue_bit(thread_t *process)
94+
{
95+
#if defined(BITARITHM_HAS_CLZ)
96+
runqueue_bitcache |= BIT31 >> process->priority;
97+
#else
98+
runqueue_bitcache |= 1 << process->priority;
99+
#endif
100+
}
101+
102+
static inline void _clear_runqueue_bit(thread_t *process)
103+
{
104+
#if defined(BITARITHM_HAS_CLZ)
105+
runqueue_bitcache &= ~(BIT31 >> process->priority);
106+
#else
107+
runqueue_bitcache &= ~(1 << process->priority);
108+
#endif
109+
}
110+
111+
static inline unsigned _get_prio_queue_from_runqueue(void)
112+
{
113+
#if defined(BITARITHM_HAS_CLZ)
114+
return 31 - bitarithm_msb(runqueue_bitcache);
115+
#else
116+
return bitarithm_lsb(runqueue_bitcache);
117+
#endif
118+
}
119+
86120
static void _unschedule(thread_t *active_thread)
87121
{
88122
if (active_thread->status == STATUS_RUNNING) {
@@ -123,7 +157,7 @@ int __attribute__((used)) sched_run(void)
123157

124158
sched_context_switch_request = 0;
125159

126-
int nextrq = bitarithm_lsb(runqueue_bitcache);
160+
unsigned nextrq = _get_prio_queue_from_runqueue();
127161
thread_t *next_thread = container_of(sched_runqueues[nextrq].next->next,
128162
thread_t, rq_entry);
129163

@@ -178,7 +212,7 @@ void sched_set_status(thread_t *process, thread_status_t status)
178212
process->pid, process->priority);
179213
clist_rpush(&sched_runqueues[process->priority],
180214
&(process->rq_entry));
181-
runqueue_bitcache |= 1 << process->priority;
215+
_set_runqueue_bit(process);
182216
}
183217
}
184218
else {
@@ -189,7 +223,7 @@ void sched_set_status(thread_t *process, thread_status_t status)
189223
clist_lpop(&sched_runqueues[process->priority]);
190224

191225
if (!sched_runqueues[process->priority].next) {
192-
runqueue_bitcache &= ~(1 << process->priority);
226+
_clear_runqueue_bit(process);
193227
}
194228
}
195229
}

0 commit comments

Comments
 (0)