Nucleus
Barry Object locking e8e484f (3 years, 3 months ago)
diff --git a/include/nucleus/cpu.h b/include/nucleus/cpu.h
index c379cb0..8ed1f43 100644
--- a/include/nucleus/cpu.h
+++ b/include/nucleus/cpu.h
@@ -3,6 +3,8 @@
#include <stdint.h>
+typedef unsigned int cpu_t;
+
/* Structure for an Interrupt Frame */
struct InterruptFrame {
uint32_t eip, cs, eflags;
@@ -15,8 +17,8 @@ extern uintptr_t lapicPtr, ioapicPtr;
#define LAPIC(off) (*((uint32_t *) ((uint32_t) lapicPtr + (off))))
#define IOAPIC(off) (*((uint32_t *) ((uint32_t) ioapicPtr + (off))))
-extern uint8_t lapicNums[];
-#define CPUID lapicNums[(uint8_t) (LAPIC(0x20) >> 24)]
+extern cpu_t lapicNums[];
+#define CPUID lapicNums[(cpu_t) (LAPIC(0x20) >> 24)]
#define MAX_CPUS 2
/* Push/pop interrupts */
diff --git a/include/nucleus/object.h b/include/nucleus/object.h
index bcd7c9a..3103736 100644
--- a/include/nucleus/object.h
+++ b/include/nucleus/object.h
@@ -1,13 +1,30 @@
#ifndef _NUCLEUS_OBJECT_H
#define _NUCLEUS_OBJECT_H
+#include <sys/types.h>
+#include <nucleus/cpu.h>
+
typedef struct ObjectType ObjectType;
+struct Object;
typedef struct Object Object;
+typedef struct Spinlock Spinlock;
+
+typedef struct Task Task; /* Just a pointer, no need for full definition */
+
+/* Spinlock */
+struct Spinlock {
+ char locked;
+ refcount_t usage;
+ union {
+ Task *owner;
+ cpu_t cpu;
+ };
+};
/* Object Type */
struct ObjectType {
unsigned int count;
- unsigned int usage;
+ refcount_t usage;
void *(*new)(void);
void (*delete)(Object *);
};
@@ -15,11 +32,14 @@ struct ObjectType {
/* Object */
struct Object {
ObjectType *type;
- unsigned int usage;
+ refcount_t usage;
+ Spinlock lock;
};
void *get(void *addr);
void put(void *addr);
void *new(ObjectType *type);
+void lock(void *addr);
+void unlock(void *addr);
#endif
diff --git a/kernel/acpi/apic.c b/kernel/acpi/apic.c
index 2e786c0..0fdb2b2 100644
--- a/kernel/acpi/apic.c
+++ b/kernel/acpi/apic.c
@@ -55,7 +55,7 @@ struct ISOEntry {
size_t numCores = 1;
uintptr_t lapicPtr, ioapicPtr;
-uint8_t lapicIds[MAX_CPUS], lapicNums[MAX_CPUS];
+cpu_t lapicIds[MAX_CPUS], lapicNums[MAX_CPUS];
/* Enable APIC */
static void
@@ -100,7 +100,7 @@ static void
apic_start_timer(void)
{
/* Wait for this processor's "turn" */
- static uint8_t c = 0;
+ static cpu_t c = 0;
while (c != CPUID);
/* Start LAPIC countdown from -1 */
diff --git a/object/lock.c b/object/lock.c
new file mode 100644
index 0000000..e4c77c9
--- /dev/null
+++ b/object/lock.c
@@ -0,0 +1,61 @@
+/*
+ * This file implements spinlocks. It makes heavy use of GCC's atomic built-ins
+ * for syncronisation. The spinlocks have some simple mechanisms for preventing
+ * deadlocks. Each spinlock knowns which CPU/task is holding it, and can allow
+ * that CPU/task to acquire it multiple times and safely release it.
+ */
+
+#include <nucleus/object.h>
+#include <nucleus/task.h>
+#include <nucleus/panic.h>
+
+/* Check if already holding */
+static int
+holding(Spinlock *lock)
+{
+ if (current)
+ return (lock->locked && lock->owner == current);
+ return (lock->locked && lock->cpu == (CPUID + 1));
+}
+
+/* Initialise a lock */
+void
+init_lock(Spinlock *lock)
+{
+ lock->locked = 0;
+ lock->usage = 0;
+}
+
+/* Acquire a lock */
+void
+acquire(Spinlock *lock)
+{
+ /*
+ * Reference count the lock so it can be safely acquired by the same
+ * holder multiple times. This stops a lock from deadlocking itself.
+ */
+ __atomic_add_fetch(&lock->usage, 1, __ATOMIC_RELAXED);
+ if (holding(lock))
+ return;
+
+ while (__atomic_test_and_set(&lock->locked, __ATOMIC_ACQUIRE))
+ asm volatile("pause");
+
+ if (current)
+ lock->owner = current;
+ else
+ lock->cpu = CPUID + 1;
+}
+
+/* Release a lock */
+void
+release(Spinlock *lock)
+{
+ ASSERT(holding(lock));
+ if (__atomic_sub_fetch(&lock->usage, 1, __ATOMIC_RELAXED))
+ return;
+ __atomic_clear(&lock->locked, __ATOMIC_RELEASE);
+
+ lock->owner = NULL;
+ lock->cpu = 0;
+}
diff --git a/object/manager.c b/object/manager.c
index 301b6ca..a330d15 100644
--- a/object/manager.c
+++ b/object/manager.c
@@ -8,13 +8,16 @@
#include <nucleus/object.h>
+void acquire(Spinlock *lock);
+void release(Spinlock *lock);
+
/* Obtain a reference to an object */
void *
get(void *addr)
{
Object *obj = addr;
- obj->type->usage++;
- obj->usage++;
+ __atomic_add_fetch(&obj->type->usage, 1, __ATOMIC_RELAXED);
+ __atomic_add_fetch(&obj->usage, 1, __ATOMIC_RELAXED);
return addr;
}
@@ -23,10 +26,10 @@ void
put(void *addr)
{
Object *obj = addr;
- obj->type->usage--;
- if (--obj->usage)
+ __atomic_sub_fetch(&obj->type->usage, 1, __ATOMIC_RELAXED);
+ if (__atomic_sub_fetch(&obj->usage, 1, __ATOMIC_RELAXED))
return;
- obj->type->count--;
+ __atomic_sub_fetch(&obj->type->count, 1, __ATOMIC_RELAXED);
obj->type->delete(obj);
}
@@ -36,6 +39,22 @@ new(ObjectType *type)
{
Object *obj = type->new();
obj->type = type;
- type->count++;
+ __atomic_add_fetch(&type->count, 1, __ATOMIC_RELAXED);
return get(obj);
}
+
+/* Lock an object */
+void
+lock(void *addr)
+{
+ Object *obj = addr;
+ acquire(&obj->lock);
+}
+
+/* Unlock an object */
+void
+unlock(void *addr)
+{
+ Object *obj = addr;
+ release(&obj->lock);
+}
diff --git a/task/queue.c b/task/queue.c
index 670c26c..071a1fe 100644
--- a/task/queue.c
+++ b/task/queue.c
@@ -38,18 +38,22 @@ task_queue_delete(Object *obj)
void
add_to_queue(TaskQueue *queue, Task *task)
{
+ lock(queue);
if (!queue->start)
queue->start = get(task);
else
queue->end->next = get(task);
queue->end = task;
task->next = NULL;
+ unlock(queue);
}
/* Remove a Task from a Task Queue */
void
remove_from_queue(TaskQueue *queue, Task *task)
{
+ lock(queue);
+
/* Start of queue */
if (queue->start == task) {
queue->start = task->next;
@@ -61,8 +65,10 @@ remove_from_queue(TaskQueue *queue, Task *task)
for (prev = queue->start; prev->next; prev = prev->next)
if (prev->next == task)
break;
- if (!prev->next)
+ if (!prev->next) {
+ unlock(queue);
return;
+ }
prev->next = task->next;
@@ -72,13 +78,17 @@ found:
task->next = NULL;
put(task);
+
+ unlock(queue);
}
/* Remove the first Task from a Task Queue */
Task *
pop_from_queue(TaskQueue *queue)
{
+ lock(queue);
Task *head = get(queue->start);
remove_from_queue(queue, queue->start);
+ unlock(queue);
return head;
}
diff --git a/task/scheduler.c b/task/scheduler.c
index 115722e..cad926d 100644
--- a/task/scheduler.c
+++ b/task/scheduler.c
@@ -17,9 +17,11 @@ TaskQueue *readyQueue[PRIORITY_COUNT];
static void
switch_to_task(Task *task)
{
+ lock(current);
asm volatile("mov %%esp, %0" : "=r" (current->esp));
asm volatile("mov %%ebp, %0" : "=r" (current->ebp));
current->eip = (uintptr_t) &&end;
+ unlock(current);
put(current);
current = task; /* Use the passed reference */