Nucleus
Barry Object manager and heap in kernel library 08afe80 (3 years, 2 months ago)
diff --git a/lib/object/lock.c b/lib/object/lock.c
new file mode 100644
index 0000000..d7a7d11
--- /dev/null
+++ b/lib/object/lock.c
@@ -0,0 +1,61 @@
+/*
+ * This file implements spinlocks. It makes heavy use of GCC's atomic built-ins
+ * for syncronisation. The spinlocks have some simple mechanisms for preventing
+ * deadlocks. Each spinlock knowns which CPU/task is holding it, and can allow
+ * that CPU/task to acquire it multiple times and safely release it.
+ */
+
+#include <nucleus/kernel.h>
+#include <nucleus/object.h>
+#include <nucleus/task.h>
+
+/* Check if already holding */
+static int
+holding(Spinlock *lock)
+{
+ if (current)
+ return (lock->locked && lock->owner == current);
+ return (lock->locked && lock->cpu == (cpu->id + 1));
+}
+
+/* Initialise a lock */
+void
+init_lock(Spinlock *lock)
+{
+ lock->locked = 0;
+ lock->usage = 0;
+}
+
+/* Acquire a lock */
+void
+acquire(Spinlock *lock)
+{
+ /*
+ * Reference count the lock so it can be safely acquired by the same
+ * holder multiple times. This stops a lock from deadlocking itself.
+ */
+ __atomic_add_fetch(&lock->usage, 1, __ATOMIC_RELAXED);
+ if (holding(lock))
+ return;
+
+ while (__atomic_test_and_set(&lock->locked, __ATOMIC_ACQUIRE))
+ asm volatile("pause");
+
+ if (current)
+ lock->owner = current;
+ else
+ lock->cpu = cpu->id + 1;
+}
+
+/* Release a lock */
+void
+release(Spinlock *lock)
+{
+ ASSERT(holding(lock));
+ if (__atomic_sub_fetch(&lock->usage, 1, __ATOMIC_RELAXED))
+ return;
+ __atomic_clear(&lock->locked, __ATOMIC_RELEASE);
+
+ lock->owner = NULL;
+ lock->cpu = 0;
+}