Nucleus
Barry Kernel threads + threads share address space 6217f0d (3 years, 1 month ago)
/*
* This file contains the scheduler. It implements a basic task switching
* routine, as well as the schedule() function. The scheduler can be called
* from anywhere, and will switch to the next task decided by the scheduler
* rules. If it cannot find a task to schedule, it just idles until one becomes
* available. This avoids the need for an idle task. Each processor has a
* scheduler, which improves performance. Tasks will run on the same processor,
* but will migrate if they must, to keep the run queues balanced.
*/
#include <nucleus/cpu.h>
#include <nucleus/kernel.h>
#include <nucleus/task.h>
static void scheduler_new(Object *);
static void scheduler_delete(Object *);
void context_switch(uintptr_t eip, uintptr_t ebx,
uintptr_t esi, uintptr_t edi,
uintptr_t ebp, uintptr_t esp);
static _Atomic size_t tasks = 0;
/* Scheduler object type */
ObjectType schedulerType = {
.name = "SCHEDULER",
.size = sizeof(Scheduler),
.new = scheduler_new,
.delete = scheduler_delete,
};
/* Create a new scheduler object */
static void
scheduler_new(Object *obj)
{
enum Priority p;
Scheduler *s = (void *) obj;
s->cpu = cpu->self;
s->task = NULL;
for (p = 0; p < PRIORITY_COUNT; p++)
s->queue[p] = create_list(&taskType, LIST_LOCKLESS);
}
/* Destroy a scheduler object */
static void
scheduler_delete(Object *obj)
{
enum Priority p;
Scheduler *s = (void *) obj;
for (p = 0; p < PRIORITY_COUNT; p++)
destroy_list(s->queue[p]);
}
/* Switch to a task */
static void
switch_to_task(Task *task)
{
/* Save current task state */
if (__builtin_expect(!!current, 1)) {
lock(current);
asm volatile("mov %%esi, %0" : "=r" (current->esi));
asm volatile("mov %%edi, %0" : "=r" (current->edi));
asm volatile("mov %%ebx, %0" : "=r" (current->ebx));
asm volatile("mov %%esp, %0" : "=r" (current->esp));
asm volatile("mov %%ebp, %0" : "=r" (current->ebp));
current->eip = (uintptr_t) &&end;
unlock(current);
put(current);
}
/* Switch to new context */
switch_to_mm(task->vm);
current = task; /* Given reference, so no get() */
set_kernel_stack((uintptr_t) current + KERNEL_STACK_SIZE);
context_switch(current->eip, current->ebx,
current->esi, current->edi,
current->ebp, current->esp);
end:
/* This prevents GCC from optimising the jump to be after the return */
asm volatile("":::"memory");
}
/* Find the next schedulable ready queue */
static enum Priority
highest_priority_queue(Scheduler *s)
{
enum Priority p;
for (p = PRIORITY_COUNT - 1; p > 0; p--) {
if (count(s->queue[p]))
return p;
}
return 0;
}
/* Schedule the next task */
void
schedule(void)
{
enter_critical_section();
Task *task = current;
Scheduler *s = cpu->scheduler;
enum Priority highest = highest_priority_queue(s);
/* Continue current task */
if (current && current->state == RUNNING
&& (!highest || current->priority > highest))
goto end;
/* Idle */
if (!highest) {
current = NULL;
if (task) {
tasks--;
s->tasks--;
}
asm volatile("sti");
while (!(highest = highest_priority_queue(s)))
asm volatile("hlt");
asm volatile("cli");
if (task) {
tasks++;
s->tasks++;
}
current = task;
}
/* Schedule next task */
task = pop_from_start(s->queue[highest]);
task->state = RUNNING;
if (task == current)
goto end;
if (current && current->state == RUNNING) {
current->state = READY;
add(s->queue[current->priority], current);
} else if (current) {
tasks--;
s->tasks--;
}
end:
s->timeslice = task->priority * 10;
exit_critical_section();
if (task != current)
switch_to_task(task);
}
/* Find the scheduler with the least tasks */
Scheduler *
least_used_scheduler(void)
{
Processor *proc;
Scheduler *best = cpu->scheduler;
for_each_cpu(proc) {
if (proc->scheduler->tasks < best->tasks)
best = proc->scheduler;
}
return best;
}
/* Add a task to a scheduler */
void
enqueue_task(Task *task)
{
Processor *target;
Scheduler *s = task->scheduler;
if (__builtin_expect(!s, 0))
s = task->scheduler = least_used_scheduler();
if (s != cpu->scheduler) {
send_ipiq(s->cpu->id, (ipiq_func_t) enqueue_task,
task, IPIQ_SYNC);
} else {
enter_critical_section();
tasks++;
s->tasks++;
add(s->queue[task->priority], task);
if (s->task && s->task->priority < task->priority)
s->timeslice = 0;
exit_critical_section();
}
}
/* Balance the schedulers */
void
balance_scheduler(void)
{
Task *t;
Scheduler *s = cpu->scheduler;
size_t max = (tasks / ncpus) + 1;
if (s->tasks <= max)
return;
/* Redistribute tasks on overloaded processors */
enter_critical_section();
while (s->tasks > max) {
s->tasks--;
t = pop_from_start(s->queue[highest_priority_queue(s)]);
t->scheduler = NULL;
enqueue_task(t);
}
exit_critical_section();
}