Skip to content
Snippets Groups Projects
Commit e74f5a0d authored by Avi Kivity's avatar Avi Kivity
Browse files

mutex: remove _locked member

It just adds complexity, and doesn't reduce locking overhead, as we must
take the spinlock anyway during unlock.

Remove it, since it makes conversion to a recursive mutex (needed
by fs/vfs/vfs_mount.c) more complicated.
parent e29da519
No related branches found
No related tags found
No related merge requests found
......@@ -9,16 +9,13 @@ struct waiter {
extern "C" void mutex_lock(mutex_t *mutex)
{
if (mutex_trylock(mutex)) {
return;
}
struct waiter w;
w.thread = sched::thread::current();
spin_lock(&mutex->_wait_lock);
if (mutex_trylock(mutex)) {
// mutex was unlocked just before we grabbed _wait_lock
if (!mutex->_owner) {
mutex->_owner = w.thread;
spin_unlock(&mutex->_wait_lock);
return;
}
......@@ -44,12 +41,14 @@ extern "C" void mutex_lock(mutex_t *mutex)
extern "C" bool mutex_trylock(mutex_t *mutex)
{
if (!__sync_lock_test_and_set(&mutex->_locked, 1)) {
bool ret = false;
spin_lock(&mutex->_wait_lock);
if (!mutex->_owner) {
mutex->_owner = sched::thread::current();
return true;
} else {
return false;
ret = true;
}
spin_unlock(&mutex->_wait_lock);
return ret;
}
extern "C" void mutex_unlock(mutex_t *mutex)
......@@ -60,7 +59,6 @@ extern "C" void mutex_unlock(mutex_t *mutex)
mutex->_wait_list.first->thread->wake();
} else {
mutex->_owner = nullptr;
__sync_lock_release(&mutex->_locked, 0);
}
spin_unlock(&mutex->_wait_lock);
}
......
......@@ -20,7 +20,6 @@ static inline void spinlock_init(spinlock_t *sl)
}
struct cmutex {
bool _locked;
spinlock_t _wait_lock;
void *_owner;
struct wait_list {
......@@ -39,7 +38,7 @@ void mutex_unlock(mutex_t* m);
static __always_inline void mutex_init(mutex_t* m)
{
m->_locked = false;
m->_owner = 0;
m->_wait_list.first = 0;
m->_wait_list.last = 0;
spinlock_init(&m->_wait_lock);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment