From e967bb87944cf086e7957b6eb2ad58e5b66a5aaa Mon Sep 17 00:00:00 2001 From: Avi Kivity <avi@cloudius-systems.com> Date: Mon, 11 Feb 2013 12:20:21 +0200 Subject: [PATCH] mutex: fairness Without fairness, the concurrent alloc/free test fails quickly on smp, as the allocator thread hogs the allocator mutex and quickly exhausts all memory. While we could fix up the test to avoid this, fairness is a desirable quality, so implement it. We do so by adding and owner field, and having the unlocker transfer ownership of the locked mutex instead of freeing it and letting the waiter race with a newcomer. Also simplify the wait list to a singly linked list. There was some corruption with the original implementation, and rather than fix it, simplify it to a singly linked list which is all that is needed. --- core/mutex.cc | 43 +++++++++++++++++++++---------------------- include/osv/mutex.h | 3 ++- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/core/mutex.cc b/core/mutex.cc index 1509f988b..2559d70d5 100644 --- a/core/mutex.cc +++ b/core/mutex.cc @@ -3,13 +3,15 @@ #include <sched.hh> struct waiter { - struct waiter* prev; struct waiter* next; sched::thread* thread; }; -extern "C" void mutex_wait(mutex_t *mutex) +extern "C" void mutex_lock(mutex_t *mutex) { + if (mutex_trylock(mutex)) { + return; + } struct waiter w; w.thread = sched::thread::current(); @@ -19,45 +21,42 @@ extern "C" void mutex_wait(mutex_t *mutex) mutex->_wait_list.first = &w; } else { mutex->_wait_list.last->next = &w; - w.prev = mutex->_wait_list.last; } + w.next = nullptr; mutex->_wait_list.last = &w; spin_unlock(&mutex->_wait_lock); sched::thread::wait_until([=] { - return !mutex->_locked && mutex->_wait_list.first == &w; + return mutex->_owner == w.thread; }); spin_lock(&mutex->_wait_lock); - if (mutex->_wait_list.first == &w) - mutex->_wait_list.first = w.next; - else - w.prev->next = w.next; - - if (mutex->_wait_list.last == &w) - mutex->_wait_list.last = w.prev; + mutex->_wait_list.first = w.next; + if (!w.next) + mutex->_wait_list.last = nullptr; spin_unlock(&mutex->_wait_lock); } -extern "C" void mutex_lock(mutex_t *mutex) -{ - while (__sync_lock_test_and_set(&mutex->_locked, 1)) { - mutex_wait(mutex); - } -} - extern "C" bool mutex_trylock(mutex_t *mutex) { - return !__sync_lock_test_and_set(&mutex->_locked, 1); + if (!__sync_lock_test_and_set(&mutex->_locked, 1)) { + mutex->_owner = sched::thread::current(); + return true; + } else { + return false; + } } extern "C" void mutex_unlock(mutex_t *mutex) { - __sync_lock_release(&mutex->_locked, 0); - spin_lock(&mutex->_wait_lock); - if (mutex->_wait_list.first) + if (mutex->_wait_list.first) { + mutex->_owner = mutex->_wait_list.first->thread; mutex->_wait_list.first->thread->wake(); + } else { + mutex->_owner = nullptr; + __sync_lock_release(&mutex->_locked, 0); + } spin_unlock(&mutex->_wait_lock); } diff --git a/include/osv/mutex.h b/include/osv/mutex.h index a60795c26..c5c021a95 100644 --- a/include/osv/mutex.h +++ b/include/osv/mutex.h @@ -21,6 +21,7 @@ static inline void spinlock_init(spinlock_t *sl) struct cmutex { bool _locked; + void *_owner; struct wait_list { struct waiter *first; struct waiter *last; @@ -30,7 +31,7 @@ struct cmutex { typedef struct cmutex mutex_t; -#define MUTEX_INITIALIZER { 0, { 0, 0 }, { 0 } } +#define MUTEX_INITIALIZER {} void mutex_lock(mutex_t* m); bool mutex_trylock(mutex_t* m); -- GitLab