BarryServer : Git

All the code for all my projects
// BarryServer : Git / Nucleus / commit / bb0cb7718204df7c0cbaf87484b1def3c4b2880f / task

// Related

Nucleus

Barry Per-CPU Scheduler bb0cb77 (3 years, 2 months ago)
diff --git a/task/clone.c b/task/clone.c
index 7f23de3..3a9f579 100644
--- a/task/clone.c
+++ b/task/clone.c
@@ -9,8 +9,6 @@
 #include <nucleus/vfs.h>
 #include <io.h>
 
-extern ObjectList *readyQueue[];
-
 /* Clone a task */
 pid_t
 clone(int flags)
@@ -68,7 +66,7 @@ clone(int flags)
 	asm volatile("mov %%esp, %0" : "=r" (child->esp));
 	asm volatile("mov %%ebp, %0" : "=r" (child->ebp));
 	child->eip = (uintptr_t) &&end;
-	add(readyQueue[child->priority], child);
+	enqueue_task(child);
 	tid = child->tid;
 	put(child);
 	exit_critical_section();
diff --git a/task/exit.c b/task/exit.c
index 4069775..b352fd4 100644
--- a/task/exit.c
+++ b/task/exit.c
@@ -12,7 +12,6 @@ _Noreturn void
 terminate(void)
 {
 	current->state = TERMINATED;
-	current->inCriticalSection = 0;
 
 	/* Unblock waiting tasks */
 	Task *tmp;
diff --git a/task/scheduler.c b/task/scheduler.c
index 02413bd..d83fe0c 100644
--- a/task/scheduler.c
+++ b/task/scheduler.c
@@ -3,21 +3,52 @@
  * routine, as well as the schedule() function.  The scheduler can be called
  * from anywhere, and will switch to the next task decided by the scheduler
  * rules.  If it cannot find a task to schedule, it just idles until one becomes
- * available.  This avoids the need for an idle task.
+ * available.  This avoids the need for an idle task.  Each processor has a
+ * scheduler, which improves performance.  Tasks will run on the same processor,
+ * but will migrate if they must, to keep the run queues balanced.
  */
 
+#include <nucleus/cpu.h>
 #include <nucleus/kernel.h>
 #include <nucleus/task.h>
 
-#define PRIORITY_COUNT 6
-
+static void scheduler_new(Object *);
+static void scheduler_delete(Object *);
 void context_switch(uintptr_t eip, page_dir_t pagedir,
                     uintptr_t esi, uintptr_t edi, uintptr_t ebx,
                     uintptr_t ebp, uintptr_t esp);
 
-extern uint8_t slice[];
+static _Atomic size_t tasks = 0;
+
+/* Scheduler object type */
+ObjectType schedulerType = {
+	.name = "SCHEDULER",
+	.size = sizeof(Scheduler),
+	.new = scheduler_new,
+	.delete = scheduler_delete,
+};
 
-ObjectList *readyQueue[PRIORITY_COUNT];
+/* Create a new scheduler object */
+static void
+scheduler_new(Object *obj)
+{
+	enum Priority p;
+	Scheduler *s = (void *) obj;
+	s->cpu = cpu->self;
+	s->task = NULL;
+	for (p = 0; p < PRIORITY_COUNT; p++)
+		s->queue[p] = create_list(&taskType, LIST_NORMAL);
+}
+
+/* Destroy a scheduler object */
+static void
+scheduler_delete(Object *obj)
+{
+	enum Priority p;
+	Scheduler *s = (void *) obj;
+	for (p = 0; p < PRIORITY_COUNT; p++)
+		destroy_list(s->queue[p]);
+}
 
 /* Switch to a task */
 static void
@@ -47,12 +78,12 @@ end:
 
 /* Find the next schedulable ready queue */
 static ObjectList *
-highest_priority_queue(void)
+highest_priority_queue(Scheduler *s)
 {
 	enum Priority p;
 	for (p = PRIORITY_COUNT - 1; p > 0; p--) {
-		if (count(readyQueue[p]))
-			return readyQueue[p];
+		if (count(s->queue[p]))
+			return s->queue[p];
 	}
 	return NULL;
 }
@@ -61,42 +92,97 @@ highest_priority_queue(void)
 void
 schedule(void)
 {
-	if (current && current->inCriticalSection)
-		return;
-
+	enter_critical_section();
 	Task *task = current;
-	ObjectList *queue = highest_priority_queue();
-	slice[cpu->id] = 0;
+	Scheduler *s = cpu->scheduler;
+	ObjectList *queue = highest_priority_queue(s);
+	s->timeslice = 0;
 
 	/* Idle if necessary */
 	if (!queue) {
 		if (current && current->state == RUNNING)
 			return;
 		current = NULL;
+		if (task)
+			s->tasks--;
 		asm volatile("sti");
-		while (!(queue = highest_priority_queue()))
+		while (!(queue = highest_priority_queue(s)))
 			asm volatile("hlt");
 		asm volatile("cli");
+		if (task)
+			s->tasks++;
 		current = task;
 	}
 
 	/* Schedule next task */
 	task = pop_from_start(queue);
 	task->state = RUNNING;
-	if (task == current)
+	if (task == current) {
+		exit_critical_section();
 		return;
+	}
 	if (current && current->state == RUNNING) {
 		current->state = READY;
-		add(readyQueue[current->priority], current);
+		add(s->queue[current->priority], current);
+	} else if (current) {
+		tasks--;
+		s->tasks--;
 	}
+	exit_critical_section();
 	switch_to_task(task);
 }
 
-/* Initialise the scheduler */
+/* Find the scheduler with the least tasks */
+Scheduler *
+least_used_scheduler(void)
+{
+	Processor *proc;
+	Scheduler *best = cpu->scheduler;
+	for_each_cpu(proc) {
+		if (proc->scheduler->tasks < best->tasks)
+			best = proc->scheduler;
+	}
+	return best;
+}
+
+/* Add a task to a scheduler */
 void
-init_scheduler(void)
+enqueue_task(Task *task)
 {
-	enum Priority p;
-	for (p = 0; p < PRIORITY_COUNT; p++)
-		readyQueue[p] = create_list(&taskType, LIST_NORMAL);
+	Processor *target;
+	Scheduler *s = task->scheduler;
+	if (__builtin_expect(!s, 0))
+		s = task->scheduler = least_used_scheduler();
+	if (s != cpu->scheduler && 0) {
+		send_ipiq(s->cpu->id, (ipiq_func_t) enqueue_task,
+		          task, IPIQ_SYNC);
+	} else {
+		enter_critical_section();
+		tasks++;
+		s->tasks++;
+		add(s->queue[task->priority], task);
+		exit_critical_section();
+	}
+}
+
+/* Balance the schedulers */
+void
+balance_scheduler(void)
+{
+	Task *t;
+	Scheduler *s = cpu->scheduler;
+	size_t max = (tasks / ncpus) + 1;
+	if (s->tasks <= max)
+		return;
+
+	/* Redistribute tasks on overloaded processors */
+	enter_critical_section();
+	while (s->tasks > max) {
+		s->tasks--;
+		t = pop_from_start(highest_priority_queue(s));
+		t->scheduler = NULL;
+		send_ipiq(least_used_scheduler()->cpu->id,
+		          (ipiq_func_t) enqueue_task, t, IPIQ_SYNC);
+	}
+	exit_critical_section();
 }
diff --git a/task/switch.S b/task/switch.S
index 5ef5437..54769af 100644
--- a/task/switch.S
+++ b/task/switch.S
@@ -5,9 +5,12 @@
  * must be taken to read and set the registers in the correct order.
  */
 
-/* Perform a context switch */
-.globl context_switch
+.section .text
+.global context_switch
+.type copy_page_frame, @function
+.align 4
 context_switch:
+.code32
 	cli
 	mov 4(%esp), %ecx
 	mov 8(%esp), %eax
diff --git a/task/syscall.c b/task/syscall.c
index effe6c2..ba7937e 100644
--- a/task/syscall.c
+++ b/task/syscall.c
@@ -93,7 +93,7 @@ syscall_handler(struct InterruptFrame *frame)
 	int num = frame->eax;
 	uintptr_t args = frame->esi;
 	int ret = -EINVAL;
-	enter_syscall_context(num);
+	enter_syscall_context();
 
 	/* Find syscall entry */
 	if (num >= sizeof(syscalls) / sizeof(syscalls[0]))
diff --git a/task/task.c b/task/task.c
index 69af014..b0268e2 100644
--- a/task/task.c
+++ b/task/task.c
@@ -12,7 +12,6 @@
 #include <nucleus/object.h>
 #include <nucleus/vfs.h>
 
-void init_scheduler(void);
 void timer_handler(struct InterruptFrame *frame);
 void syscall_handler(struct InterruptFrame *frame);
 
@@ -27,17 +26,15 @@ ObjectType taskType = {
 	.delete = task_delete,
 };
 
-Task *currentTask[MAX_CPUS];
-pid_t nextTid = 1;
 extern char stackTop[];
-extern ObjectList *readyQueue[];
 
 /* Create a new Task */
 static void
 task_new(Object *obj)
 {
+	static pid_t tid = 1;
 	Task *task = (void *) obj;
-	task->tid = nextTid++;
+	task->tid = tid++;
 	task->tgid = task->tid;
 	task->priority = NORMAL;
 	task->state = READY;
@@ -48,16 +45,22 @@ static void
 task_delete(Object *obj)
 {
 	Task *task = (void *) obj;
-	put(task->executable);
-	put(task->stack);
+	if (task->executable)
+		put(task->executable);
+	if (task->stack)
+		put(task->stack);
 	if (task->target)
 		put(task->target);
 	if (task->wait)
 		destroy_list(task->wait);
-	put(task->fs);
-	put(task->files);
-	put(task->vm);
-	put(task->signals);
+	if (task->fs)
+		put(task->fs);
+	if (task->files)
+		put(task->files);
+	if (task->vm)
+		put(task->vm);
+	if (task->signals)
+		put(task->signals);
 }
 
 /* Move the stack */
@@ -109,7 +112,7 @@ init_tasking(void)
 	/* Signals namespace */
 	current->signals = new(&signalsType);
 
-	init_scheduler();
+	cpu->scheduler->tasks = 1;
 	register_interrupt(0, timer_handler);
 	register_exception(128, syscall_handler);
 }
@@ -139,7 +142,7 @@ unblock_task(Task *task)
 {
 	lock(task);
 	task->state = READY;
-	add(readyQueue[task->priority], task);
+	enqueue_task(task);
 	unlock(task);
 }
 
diff --git a/task/time.c b/task/time.c
index d6a47d9..654795c 100644
--- a/task/time.c
+++ b/task/time.c
@@ -8,8 +8,9 @@
 #include <nucleus/cpu.h>
 #include <nucleus/task.h>
 
+void balance_scheduler(void);
+
 uint32_t monotonic = 0;
-uint8_t slice[MAX_CPUS] = {0};
 
 /* Timer interrupt */
 void
@@ -17,14 +18,17 @@ timer_handler(struct InterruptFrame *frame)
 {
 	if (cpu->id == 0)
 		monotonic++;
-
 	if (!current)
 		return;
-	slice[cpu->id]++;
+
+	/* Book-keep the scheduler */
+	Scheduler *s = cpu->scheduler;
+	if (monotonic % 300000 == 0)
+		balance_scheduler();
+	s->timeslice++;
 
 	/* Call scheduler */
-	if (slice[cpu->id] < (current->priority * 10))
+	if (s->timeslice < (current->priority * 10))
 		return;
-	slice[cpu->id] = 0;
 	schedule();
 }