Nucleus
Barry Per-CPU Scheduler bb0cb77 (3 years, 2 months ago)
diff --git a/task/scheduler.c b/task/scheduler.c
index 02413bd..d83fe0c 100644
--- a/task/scheduler.c
+++ b/task/scheduler.c
@@ -3,21 +3,52 @@
* routine, as well as the schedule() function. The scheduler can be called
* from anywhere, and will switch to the next task decided by the scheduler
* rules. If it cannot find a task to schedule, it just idles until one becomes
- * available. This avoids the need for an idle task.
+ * available. This avoids the need for an idle task. Each processor has a
+ * scheduler, which improves performance. Tasks will run on the same processor,
+ * but will migrate if they must, to keep the run queues balanced.
*/
+#include <nucleus/cpu.h>
#include <nucleus/kernel.h>
#include <nucleus/task.h>
-#define PRIORITY_COUNT 6
-
+static void scheduler_new(Object *);
+static void scheduler_delete(Object *);
void context_switch(uintptr_t eip, page_dir_t pagedir,
uintptr_t esi, uintptr_t edi, uintptr_t ebx,
uintptr_t ebp, uintptr_t esp);
-extern uint8_t slice[];
+static _Atomic size_t tasks = 0;
+
+/* Scheduler object type */
+ObjectType schedulerType = {
+ .name = "SCHEDULER",
+ .size = sizeof(Scheduler),
+ .new = scheduler_new,
+ .delete = scheduler_delete,
+};
-ObjectList *readyQueue[PRIORITY_COUNT];
+/* Create a new scheduler object */
+static void
+scheduler_new(Object *obj)
+{
+ enum Priority p;
+ Scheduler *s = (void *) obj;
+ s->cpu = cpu->self;
+ s->task = NULL;
+ for (p = 0; p < PRIORITY_COUNT; p++)
+ s->queue[p] = create_list(&taskType, LIST_NORMAL);
+}
+
+/* Destroy a scheduler object */
+static void
+scheduler_delete(Object *obj)
+{
+ enum Priority p;
+ Scheduler *s = (void *) obj;
+ for (p = 0; p < PRIORITY_COUNT; p++)
+ destroy_list(s->queue[p]);
+}
/* Switch to a task */
static void
@@ -47,12 +78,12 @@ end:
/* Find the next schedulable ready queue */
static ObjectList *
-highest_priority_queue(void)
+highest_priority_queue(Scheduler *s)
{
enum Priority p;
for (p = PRIORITY_COUNT - 1; p > 0; p--) {
- if (count(readyQueue[p]))
- return readyQueue[p];
+ if (count(s->queue[p]))
+ return s->queue[p];
}
return NULL;
}
@@ -61,42 +92,97 @@ highest_priority_queue(void)
void
schedule(void)
{
- if (current && current->inCriticalSection)
- return;
-
+ enter_critical_section();
Task *task = current;
- ObjectList *queue = highest_priority_queue();
- slice[cpu->id] = 0;
+ Scheduler *s = cpu->scheduler;
+ ObjectList *queue = highest_priority_queue(s);
+ s->timeslice = 0;
/* Idle if necessary */
if (!queue) {
if (current && current->state == RUNNING)
return;
current = NULL;
+ if (task)
+ s->tasks--;
asm volatile("sti");
- while (!(queue = highest_priority_queue()))
+ while (!(queue = highest_priority_queue(s)))
asm volatile("hlt");
asm volatile("cli");
+ if (task)
+ s->tasks++;
current = task;
}
/* Schedule next task */
task = pop_from_start(queue);
task->state = RUNNING;
- if (task == current)
+ if (task == current) {
+ exit_critical_section();
return;
+ }
if (current && current->state == RUNNING) {
current->state = READY;
- add(readyQueue[current->priority], current);
+ add(s->queue[current->priority], current);
+ } else if (current) {
+ tasks--;
+ s->tasks--;
}
+ exit_critical_section();
switch_to_task(task);
}
-/* Initialise the scheduler */
+/* Find the scheduler with the least tasks */
+Scheduler *
+least_used_scheduler(void)
+{
+ Processor *proc;
+ Scheduler *best = cpu->scheduler;
+ for_each_cpu(proc) {
+ if (proc->scheduler->tasks < best->tasks)
+ best = proc->scheduler;
+ }
+ return best;
+}
+
+/* Add a task to a scheduler */
void
-init_scheduler(void)
+enqueue_task(Task *task)
{
- enum Priority p;
- for (p = 0; p < PRIORITY_COUNT; p++)
- readyQueue[p] = create_list(&taskType, LIST_NORMAL);
+ Processor *target;
+ Scheduler *s = task->scheduler;
+ if (__builtin_expect(!s, 0))
+ s = task->scheduler = least_used_scheduler();
+ if (s != cpu->scheduler && 0) {
+ send_ipiq(s->cpu->id, (ipiq_func_t) enqueue_task,
+ task, IPIQ_SYNC);
+ } else {
+ enter_critical_section();
+ tasks++;
+ s->tasks++;
+ add(s->queue[task->priority], task);
+ exit_critical_section();
+ }
+}
+
+/* Balance the schedulers */
+void
+balance_scheduler(void)
+{
+ Task *t;
+ Scheduler *s = cpu->scheduler;
+ size_t max = (tasks / ncpus) + 1;
+ if (s->tasks <= max)
+ return;
+
+ /* Redistribute tasks on overloaded processors */
+ enter_critical_section();
+ while (s->tasks > max) {
+ s->tasks--;
+ t = pop_from_start(highest_priority_queue(s));
+ t->scheduler = NULL;
+ send_ipiq(least_used_scheduler()->cpu->id,
+ (ipiq_func_t) enqueue_task, t, IPIQ_SYNC);
+ }
+ exit_critical_section();
}