BarryServer : Git

All the code for all my projects
// BarryServer : Git / Nucleus / commit / a1eaf264431107a223a69ca35d3b916243d7b90e

// Related

Nucleus

Barry Preemptive and lockless scheduler a1eaf26 (3 years, 1 month ago)
diff --git a/Makefile b/Makefile
index 5ec91e5..4521552 100644
--- a/Makefile
+++ b/Makefile
@@ -50,7 +50,7 @@ build/%.o: %.S
 	@$(AS) -c $< -o $@ $(AFLAGS)
 
 # Overrides
-build/object/%.o: object/%.c
+build/lib/object/%.o: lib/object/%.c
 	$(info CC $<)
 	@mkdir -p $(@D)
 	@$(CC) -c $< -o $@ $(CFLAGS) -O3
diff --git a/include/nucleus/object.h b/include/nucleus/object.h
index 558d00f..f22e62b 100644
--- a/include/nucleus/object.h
+++ b/include/nucleus/object.h
@@ -15,12 +15,9 @@ typedef int (*compare_callback_t)(void *, void *);
 
 /* Spinlock */
 struct Spinlock {
-	_Atomic char locked;
+	_Atomic int locked;
 	refcount_t usage;
-	union {
-		Task *owner;
-		cpu_t cpu;
-	};
+	Task *owner;
 };
 
 /* Object Type */
diff --git a/lib/object/lock.c b/lib/object/lock.c
index d7a7d11..58f742f 100644
--- a/lib/object/lock.c
+++ b/lib/object/lock.c
@@ -10,12 +10,10 @@
 #include <nucleus/task.h>
 
 /* Check if already holding */
-static int
+static inline int
 holding(Spinlock *lock)
 {
-	if (current)
-		return (lock->locked && lock->owner == current);
-	return (lock->locked && lock->cpu == (cpu->id + 1));
+	return (lock->locked && lock->owner == current);
 }
 
 /* Initialise a lock */
@@ -30,21 +28,12 @@ init_lock(Spinlock *lock)
 void
 acquire(Spinlock *lock)
 {
-	/*
-	 * Reference count the lock so it can be safely acquired by the same
-	 * holder multiple times.  This stops a lock from deadlocking itself.
-	 */
-	__atomic_add_fetch(&lock->usage, 1, __ATOMIC_RELAXED);
-	if (holding(lock))
-		return;
-
-	while (__atomic_test_and_set(&lock->locked, __ATOMIC_ACQUIRE))
-		asm volatile("pause");
-
-	if (current)
+	if (!holding(lock)) {
+		while (__atomic_test_and_set(&lock->locked, __ATOMIC_ACQUIRE))
+			asm volatile("pause");
 		lock->owner = current;
-	else
-		lock->cpu = cpu->id + 1;
+	}
+	__atomic_add_fetch(&lock->usage, 1, __ATOMIC_RELAXED);
 }
 
 /* Release a lock */
@@ -52,10 +41,8 @@ void
 release(Spinlock *lock)
 {
 	ASSERT(holding(lock));
-	if (__atomic_sub_fetch(&lock->usage, 1, __ATOMIC_RELAXED))
-		return;
-	__atomic_clear(&lock->locked, __ATOMIC_RELEASE);
-
-	lock->owner = NULL;
-	lock->cpu = 0;
+	if (!__atomic_sub_fetch(&lock->usage, 1, __ATOMIC_RELAXED)) {
+		__atomic_clear(&lock->locked, __ATOMIC_RELEASE);
+		lock->owner = NULL;
+	}
 }
diff --git a/memory/fault.c b/memory/fault.c
index d8accb6..7dfae10 100644
--- a/memory/fault.c
+++ b/memory/fault.c
@@ -184,8 +184,8 @@ page_fault_handler(struct InterruptFrame *frame)
 
 	/* Iterate VM Regions */
 	VMRegion *region = find_region(addr);
-	/* Not in a region */
 	if (__builtin_expect(!region, 0)) {
+		/* Not in a region */
 		page_t pg = get_page(addr);
 		panic("Page Fault [%d:%d] (%#.8x -> %#.8x [tbl:%d, pg:%d][%#.8x], %s, %s, %s)",
 		      current->tgid, current->tid, frame->eip,
diff --git a/task/scheduler.c b/task/scheduler.c
index d83fe0c..d0fb83f 100644
--- a/task/scheduler.c
+++ b/task/scheduler.c
@@ -37,7 +37,7 @@ scheduler_new(Object *obj)
 	s->cpu = cpu->self;
 	s->task = NULL;
 	for (p = 0; p < PRIORITY_COUNT; p++)
-		s->queue[p] = create_list(&taskType, LIST_NORMAL);
+		s->queue[p] = create_list(&taskType, LIST_LOCKLESS);
 }
 
 /* Destroy a scheduler object */
@@ -96,31 +96,34 @@ schedule(void)
 	Task *task = current;
 	Scheduler *s = cpu->scheduler;
 	ObjectList *queue = highest_priority_queue(s);
-	s->timeslice = 0;
 
-	/* Idle if necessary */
+	/* No schedulable tasks */
 	if (!queue) {
 		if (current && current->state == RUNNING)
-			return;
+			goto end;
+
+		/* Idle */
 		current = NULL;
-		if (task)
+		if (task) {
+			tasks--;
 			s->tasks--;
+		}
 		asm volatile("sti");
 		while (!(queue = highest_priority_queue(s)))
 			asm volatile("hlt");
 		asm volatile("cli");
-		if (task)
+		if (task) {
+			tasks++;
 			s->tasks++;
+		}
 		current = task;
 	}
 
 	/* Schedule next task */
 	task = pop_from_start(queue);
 	task->state = RUNNING;
-	if (task == current) {
-		exit_critical_section();
-		return;
-	}
+	if (task == current)
+		goto end;
 	if (current && current->state == RUNNING) {
 		current->state = READY;
 		add(s->queue[current->priority], current);
@@ -128,8 +131,11 @@ schedule(void)
 		tasks--;
 		s->tasks--;
 	}
+end:
+	s->timeslice = task->priority * 10;
 	exit_critical_section();
-	switch_to_task(task);
+	if (task != current)
+		switch_to_task(task);
 }
 
 /* Find the scheduler with the least tasks */
@@ -153,7 +159,7 @@ enqueue_task(Task *task)
 	Scheduler *s = task->scheduler;
 	if (__builtin_expect(!s, 0))
 		s = task->scheduler = least_used_scheduler();
-	if (s != cpu->scheduler && 0) {
+	if (s != cpu->scheduler) {
 		send_ipiq(s->cpu->id, (ipiq_func_t) enqueue_task,
 		          task, IPIQ_SYNC);
 	} else {
@@ -161,6 +167,8 @@ enqueue_task(Task *task)
 		tasks++;
 		s->tasks++;
 		add(s->queue[task->priority], task);
+		if (s->task && s->task->priority < task->priority)
+			s->timeslice = 0;
 		exit_critical_section();
 	}
 }
@@ -181,8 +189,7 @@ balance_scheduler(void)
 		s->tasks--;
 		t = pop_from_start(highest_priority_queue(s));
 		t->scheduler = NULL;
-		send_ipiq(least_used_scheduler()->cpu->id,
-		          (ipiq_func_t) enqueue_task, t, IPIQ_SYNC);
+		enqueue_task(t);
 	}
 	exit_critical_section();
 }
diff --git a/task/task.c b/task/task.c
index 3d84436..539026a 100644
--- a/task/task.c
+++ b/task/task.c
@@ -142,8 +142,8 @@ unblock_task(Task *task)
 {
 	lock(task);
 	task->state = READY;
-	enqueue_task(task);
 	unlock(task);
+	enqueue_task(task);
 }
 
 /* Find a task by ID */
diff --git a/task/time.c b/task/time.c
index 654795c..d4d01f0 100644
--- a/task/time.c
+++ b/task/time.c
@@ -25,10 +25,7 @@ timer_handler(struct InterruptFrame *frame)
 	Scheduler *s = cpu->scheduler;
 	if (monotonic % 300000 == 0)
 		balance_scheduler();
-	s->timeslice++;
-
-	/* Call scheduler */
-	if (s->timeslice < (current->priority * 10))
-		return;
-	schedule();
+	if (s->timeslice == 0)
+		schedule();
+	s->timeslice--;
 }