diff --git a/kernel/src/sched/mutex.c b/kernel/src/sched/mutex.c index d7473ad..e5f4701 100644 --- a/kernel/src/sched/mutex.c +++ b/kernel/src/sched/mutex.c @@ -7,16 +7,14 @@ bool mutex_try_lock(Mutex* self) { } void mutex_lock(Mutex* self) { - bool result = atomic_exchange_explicit(&self->inner, true, memory_order_acquire); - if (result) { - Task* task = arch_get_cur_task(); - Ipl old = arch_ipl_set(IPL_CRITICAL); + Ipl old = arch_ipl_set(IPL_CRITICAL); + spinlock_lock(&self->task_protector); + if (self->inner) { + Task* task = arch_get_cur_task(); task->status = TASK_STATUS_WAITING; task->next = NULL; - spinlock_lock(&self->task_protector); - if (self->waiting_tasks) { self->waiting_tasks_end->next = task; self->waiting_tasks_end = task; @@ -27,24 +25,28 @@ void mutex_lock(Mutex* self) { } spinlock_unlock(&self->task_protector); - arch_ipl_set(old); sched(); } + else { + self->inner = true; + spinlock_unlock(&self->task_protector); + arch_ipl_set(old); + } } void mutex_unlock(Mutex* self) { - atomic_store_explicit(&self->inner, false, memory_order_release); Ipl old = arch_ipl_set(IPL_CRITICAL); spinlock_lock(&self->task_protector); if (self->waiting_tasks) { Task* task = self->waiting_tasks; self->waiting_tasks = task->next; spinlock_unlock(&self->task_protector); - arch_ipl_set(old); sched_unblock(task); + arch_ipl_set(old); } else { + self->inner = false; spinlock_unlock(&self->task_protector); arch_ipl_set(old); } diff --git a/kernel/src/sched/mutex.h b/kernel/src/sched/mutex.h index 43a0495..9701e49 100644 --- a/kernel/src/sched/mutex.h +++ b/kernel/src/sched/mutex.h @@ -7,8 +7,8 @@ typedef struct Task Task; typedef struct { Task* waiting_tasks; Task* waiting_tasks_end; - atomic_bool inner; Spinlock task_protector; + bool inner; } Mutex; bool mutex_try_lock(Mutex* self);