Skip to content
Snippets Groups Projects
Commit f977654a authored by Hauke Petersen's avatar Hauke Petersen
Browse files

Merge pull request #5270 from authmillenon/core/fix/mutex-debug

core: allow DEBUG in mutex.c to run without DEVELHELP
parents a42525a7 1750e1ba
No related branches found
No related tags found
No related merge requests found
...@@ -40,18 +40,21 @@ ...@@ -40,18 +40,21 @@
int _mutex_lock(mutex_t *mutex, int blocking) int _mutex_lock(mutex_t *mutex, int blocking)
{ {
unsigned irqstate = irq_disable(); unsigned irqstate = irq_disable();
DEBUG("%s: Mutex in use.\n", sched_active_thread->name);
DEBUG("PID[%" PRIkernel_pid "]: Mutex in use.\n", sched_active_pid);
if (mutex->queue.next == NULL) { if (mutex->queue.next == NULL) {
/* mutex is unlocked. */ /* mutex is unlocked. */
mutex->queue.next = MUTEX_LOCKED; mutex->queue.next = MUTEX_LOCKED;
DEBUG("%s: mutex_wait early out.\n", sched_active_thread->name); DEBUG("PID[%" PRIkernel_pid "]: mutex_wait early out.\n",
sched_active_pid);
irq_restore(irqstate); irq_restore(irqstate);
return 1; return 1;
} }
else if (blocking) { else if (blocking) {
thread_t *me = (thread_t*) sched_active_thread; thread_t *me = (thread_t*)sched_active_thread;
DEBUG("%s: Adding node to mutex queue: prio: %" PRIu32 "\n", me->name, (uint32_t)me->priority); DEBUG("PID[%" PRIkernel_pid "]: Adding node to mutex queue: prio: %"
PRIu32 "\n", sched_active_pid, (uint32_t)me->priority);
sched_set_status(me, STATUS_MUTEX_BLOCKED); sched_set_status(me, STATUS_MUTEX_BLOCKED);
if (mutex->queue.next == MUTEX_LOCKED) { if (mutex->queue.next == MUTEX_LOCKED) {
mutex->queue.next = (list_node_t*)&me->rq_entry; mutex->queue.next = (list_node_t*)&me->rq_entry;
...@@ -62,7 +65,8 @@ int _mutex_lock(mutex_t *mutex, int blocking) ...@@ -62,7 +65,8 @@ int _mutex_lock(mutex_t *mutex, int blocking)
} }
irq_restore(irqstate); irq_restore(irqstate);
thread_yield_higher(); thread_yield_higher();
/* we were woken up by scheduler. waker removed us from queue. we have the mutex now. */ /* We were woken up by scheduler. Waker removed us from queue.
* We have the mutex now. */
return 1; return 1;
} }
else { else {
...@@ -74,7 +78,9 @@ int _mutex_lock(mutex_t *mutex, int blocking) ...@@ -74,7 +78,9 @@ int _mutex_lock(mutex_t *mutex, int blocking)
void mutex_unlock(mutex_t *mutex) void mutex_unlock(mutex_t *mutex)
{ {
unsigned irqstate = irq_disable(); unsigned irqstate = irq_disable();
DEBUG("mutex_unlock(): queue.next: 0x%08x pid: %" PRIkernel_pid "\n", (unsigned)mutex->queue.next, sched_active_pid);
DEBUG("mutex_unlock(): queue.next: 0x%08x pid: %" PRIkernel_pid "\n",
(unsigned)mutex->queue.next, sched_active_pid);
if (mutex->queue.next == NULL) { if (mutex->queue.next == NULL) {
/* the mutex was not locked */ /* the mutex was not locked */
...@@ -93,7 +99,8 @@ void mutex_unlock(mutex_t *mutex) ...@@ -93,7 +99,8 @@ void mutex_unlock(mutex_t *mutex)
thread_t *process = container_of((clist_node_t*)next, thread_t, rq_entry); thread_t *process = container_of((clist_node_t*)next, thread_t, rq_entry);
DEBUG("mutex_unlock: waking up waiting thread %" PRIkernel_pid "\n", process->pid); DEBUG("mutex_unlock: waking up waiting thread %" PRIkernel_pid "\n",
process->pid);
sched_set_status(process, STATUS_PENDING); sched_set_status(process, STATUS_PENDING);
if (!mutex->queue.next) { if (!mutex->queue.next) {
...@@ -107,7 +114,8 @@ void mutex_unlock(mutex_t *mutex) ...@@ -107,7 +114,8 @@ void mutex_unlock(mutex_t *mutex)
void mutex_unlock_and_sleep(mutex_t *mutex) void mutex_unlock_and_sleep(mutex_t *mutex)
{ {
DEBUG("%s: unlocking mutex. queue.next: 0x%08x pid: %" PRIkernel_pid ", and taking a nap\n", sched_active_thread->name, (unsigned)mutex->queue.next, sched_active_pid); DEBUG("PID[%" PRIkernel_pid "]: unlocking mutex. queue.next: 0x%08x, and "
"taking a nap\n", sched_active_pid, (unsigned)mutex->queue.next);
unsigned irqstate = irq_disable(); unsigned irqstate = irq_disable();
if (mutex->queue.next) { if (mutex->queue.next) {
...@@ -116,8 +124,9 @@ void mutex_unlock_and_sleep(mutex_t *mutex) ...@@ -116,8 +124,9 @@ void mutex_unlock_and_sleep(mutex_t *mutex)
} }
else { else {
list_node_t *next = list_remove_head(&mutex->queue); list_node_t *next = list_remove_head(&mutex->queue);
thread_t *process = container_of((clist_node_t*)next, thread_t, rq_entry); thread_t *process = container_of((clist_node_t*)next, thread_t,
DEBUG("%s: waking up waiter.\n", process->name); rq_entry);
DEBUG("PID[%" PRIkernel_pid "]: waking up waiter.\n", process->pid);
sched_set_status(process, STATUS_PENDING); sched_set_status(process, STATUS_PENDING);
if (!mutex->queue.next) { if (!mutex->queue.next) {
mutex->queue.next = MUTEX_LOCKED; mutex->queue.next = MUTEX_LOCKED;
...@@ -125,8 +134,8 @@ void mutex_unlock_and_sleep(mutex_t *mutex) ...@@ -125,8 +134,8 @@ void mutex_unlock_and_sleep(mutex_t *mutex)
} }
} }
DEBUG("%s: going to sleep.\n", sched_active_thread->name); DEBUG("PID[%" PRIkernel_pid "]: going to sleep.\n", sched_active_pid);
sched_set_status((thread_t*) sched_active_thread, STATUS_SLEEPING); sched_set_status((thread_t*)sched_active_thread, STATUS_SLEEPING);
irq_restore(irqstate); irq_restore(irqstate);
thread_yield_higher(); thread_yield_higher();
} }
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment