Nucleus
Barry Preemptive and lockless scheduler a1eaf26 (3 years, 1 month ago)
diff --git a/lib/object/lock.c b/lib/object/lock.c
index d7a7d11..58f742f 100644
--- a/lib/object/lock.c
+++ b/lib/object/lock.c
@@ -10,12 +10,10 @@
#include <nucleus/task.h>
/* Check if already holding */
-static int
+static inline int
holding(Spinlock *lock)
{
- if (current)
- return (lock->locked && lock->owner == current);
- return (lock->locked && lock->cpu == (cpu->id + 1));
+ return (lock->locked && lock->owner == current);
}
/* Initialise a lock */
@@ -30,21 +28,12 @@ init_lock(Spinlock *lock)
void
acquire(Spinlock *lock)
{
- /*
- * Reference count the lock so it can be safely acquired by the same
- * holder multiple times. This stops a lock from deadlocking itself.
- */
- __atomic_add_fetch(&lock->usage, 1, __ATOMIC_RELAXED);
- if (holding(lock))
- return;
-
- while (__atomic_test_and_set(&lock->locked, __ATOMIC_ACQUIRE))
- asm volatile("pause");
-
- if (current)
+ if (!holding(lock)) {
+ while (__atomic_test_and_set(&lock->locked, __ATOMIC_ACQUIRE))
+ asm volatile("pause");
lock->owner = current;
- else
- lock->cpu = cpu->id + 1;
+ }
+ __atomic_add_fetch(&lock->usage, 1, __ATOMIC_RELAXED);
}
/* Release a lock */
@@ -52,10 +41,8 @@ void
release(Spinlock *lock)
{
ASSERT(holding(lock));
- if (__atomic_sub_fetch(&lock->usage, 1, __ATOMIC_RELAXED))
- return;
- __atomic_clear(&lock->locked, __ATOMIC_RELEASE);
-
- lock->owner = NULL;
- lock->cpu = 0;
+ if (!__atomic_sub_fetch(&lock->usage, 1, __ATOMIC_RELAXED)) {
+ __atomic_clear(&lock->locked, __ATOMIC_RELEASE);
+ lock->owner = NULL;
+ }
}