BarryServer : Git

All the code for all my projects
// BarryServer : Git / Nucleus / commit / e8e484f3952a9a3b7df1c3f5763a794a51ea6966 / object / lock.c

// Related

Nucleus

Barry Object locking e8e484f (3 years, 3 months ago)
diff --git a/object/lock.c b/object/lock.c
new file mode 100644
index 0000000..e4c77c9
--- /dev/null
+++ b/object/lock.c
@@ -0,0 +1,61 @@
+/*
+ * This file implements spinlocks.  It makes heavy use of GCC's atomic built-ins
+ * for syncronisation.  The spinlocks have some simple mechanisms for preventing
+ * deadlocks.  Each spinlock knowns which CPU/task is holding it, and can allow
+ * that CPU/task to acquire it multiple times and safely release it.
+ */
+
+#include <nucleus/object.h>
+#include <nucleus/task.h>
+#include <nucleus/panic.h>
+
+/* Check if already holding */
+static int
+holding(Spinlock *lock)
+{
+	if (current)
+		return (lock->locked && lock->owner == current);
+	return (lock->locked && lock->cpu == (CPUID + 1));
+}
+
+/* Initialise a lock */
+void
+init_lock(Spinlock *lock)
+{
+	lock->locked = 0;
+	lock->usage = 0;
+}
+
+/* Acquire a lock */
+void
+acquire(Spinlock *lock)
+{
+	/*
+	 * Reference count the lock so it can be safely acquired by the same
+	 * holder multiple times.  This stops a lock from deadlocking itself.
+	 */
+	__atomic_add_fetch(&lock->usage, 1, __ATOMIC_RELAXED);
+	if (holding(lock))
+		return;
+
+	while (__atomic_test_and_set(&lock->locked, __ATOMIC_ACQUIRE))
+		asm volatile("pause");
+
+	if (current)
+		lock->owner = current;
+	else
+		lock->cpu = CPUID + 1;
+}
+
+/* Release a lock */
+void
+release(Spinlock *lock)
+{
+	ASSERT(holding(lock));
+	if (__atomic_sub_fetch(&lock->usage, 1, __ATOMIC_RELAXED))
+		return;
+	__atomic_clear(&lock->locked, __ATOMIC_RELEASE);
+
+	lock->owner = NULL;
+	lock->cpu = 0;
+}