Skip to content
Snippets Groups Projects
Commit f626ee59 authored by Kaspar Schleiser's avatar Kaspar Schleiser
Browse files

Merge pull request #4557 from kaspar030/introduce_intrusive_singly_linked_list

core: mutex: several optimizations
parents b612eb9d 3d9020eb
No related branches found
No related tags found
No related merge requests found
/*
* Copyright (C) 2016 Kaspar Schleiser <kaspar@schleiser.de>
*
* This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level
* directory for more details.
*/
/**
* @addtogroup core_util
* @{
*
* @file
* @brief Intrusive linked list
*
* Lists are represented as element pointing to the first actual list element.
*
* @author Kaspar Schleiser <kaspar@schleiser.de>
*/
#ifndef LIST_H
#define LIST_H
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief List node structure
*
* Used as is as reference to a list, or as member of any data structure that
* should be member of a list.
*
* Actual list objects should have a @c list_node_t as member and then use
* the container_of() macro in list operations.
* See @ref thread_add_to_list() as example.
*/
typedef struct list_node {
struct list_node *next; /**< pointer to next list entry */
} list_node_t;
/**
* @brief Insert object into list
*
* If called with a list reference as node, the new node will become the new
* list head.
*
* @param[in] node list node before new entry
* @param[in] new_node list node to insert
*/
static inline void list_add(list_node_t *node, list_node_t *new_node) {
new_node->next = node->next;
node->next = new_node;
}
/**
* @brief Removes the head of the list and returns it
*
* @param[in] list Pointer to the list itself, where list->next points
* to the root node
*
* @return removed old list head, or NULL if empty
*/
static inline list_node_t* list_remove_head(list_node_t *list) {
list_node_t* head = list->next;
if (head) {
list->next = head->next;
}
return head;
}
#ifdef __cplusplus
}
#endif
#endif /* LIST_H */
/** @} */
/* /*
* Copyright (C) 2013, 2014 Freie Universität Berlin * Copyright (C) 2015 Kaspar Schleiser <kaspar@schleiser.de>
* 2013, 2014 Freie Universität Berlin
* *
* This file is subject to the terms and conditions of the GNU Lesser * This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level * General Public License v2.1. See the file LICENSE in the top level
...@@ -21,7 +22,9 @@ ...@@ -21,7 +22,9 @@
#ifndef MUTEX_H_ #ifndef MUTEX_H_
#define MUTEX_H_ #define MUTEX_H_
#include "priority_queue.h" #include <stddef.h>
#include "list.h"
#include "atomic.h" #include "atomic.h"
#ifdef __cplusplus #ifdef __cplusplus
...@@ -31,27 +34,20 @@ ...@@ -31,27 +34,20 @@
/** /**
* @brief Mutex structure. Must never be modified by the user. * @brief Mutex structure. Must never be modified by the user.
*/ */
typedef struct mutex_t { typedef struct {
/* fields are managed by mutex functions, don't touch */
/**
* @brief The value of the mutex; 0 if unlocked, 1 if locked. **Must
* never be changed by the user.**
* @internal
*/
atomic_int_t val;
/** /**
* @brief The process waiting queue of the mutex. **Must never be changed * @brief The process waiting queue of the mutex. **Must never be changed
* by the user.** * by the user.**
* @internal * @internal
*/ */
priority_queue_t queue; list_node_t queue;
} mutex_t; } mutex_t;
/** /**
* @brief Static initializer for mutex_t. * @brief Static initializer for mutex_t.
* @details This initializer is preferable to mutex_init(). * @details This initializer is preferable to mutex_init().
*/ */
#define MUTEX_INIT { ATOMIC_INIT(0), PRIORITY_QUEUE_INIT } #define MUTEX_INIT { { NULL } }
/** /**
* @brief Initializes a mutex object. * @brief Initializes a mutex object.
...@@ -61,10 +57,24 @@ typedef struct mutex_t { ...@@ -61,10 +57,24 @@ typedef struct mutex_t {
*/ */
static inline void mutex_init(mutex_t *mutex) static inline void mutex_init(mutex_t *mutex)
{ {
mutex_t empty_mutex = MUTEX_INIT; mutex->queue.next = NULL;
*mutex = empty_mutex;
} }
/**
* @brief Lock a mutex, blocking or non-blocking.
*
* @details For commit purposes you should probably use mutex_trylock() and
* mutex_lock() instead.
*
* @param[in] mutex Mutex object to lock. Has to be initialized first.
* Must not be NULL.
* @param[in] blocking if true, block until mutex is available.
*
* @return 1 if mutex was unlocked, now it is locked.
* @return 0 if the mutex was locked.
*/
int _mutex_lock(mutex_t *mutex, int blocking);
/** /**
* @brief Tries to get a mutex, non-blocking. * @brief Tries to get a mutex, non-blocking.
* *
...@@ -74,14 +84,20 @@ static inline void mutex_init(mutex_t *mutex) ...@@ -74,14 +84,20 @@ static inline void mutex_init(mutex_t *mutex)
* @return 1 if mutex was unlocked, now it is locked. * @return 1 if mutex was unlocked, now it is locked.
* @return 0 if the mutex was locked. * @return 0 if the mutex was locked.
*/ */
int mutex_trylock(mutex_t *mutex); static inline int mutex_trylock(mutex_t *mutex)
{
return _mutex_lock(mutex, 0);
}
/** /**
* @brief Locks a mutex, blocking. * @brief Locks a mutex, blocking.
* *
* @param[in] mutex Mutex object to lock. Has to be initialized first. Must not be NULL. * @param[in] mutex Mutex object to lock. Has to be initialized first. Must not be NULL.
*/ */
void mutex_lock(mutex_t *mutex); static inline void mutex_lock(mutex_t *mutex)
{
_mutex_lock(mutex, 1);
}
/** /**
* @brief Unlocks the mutex. * @brief Unlocks the mutex.
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "arch/thread_arch.h" #include "arch/thread_arch.h"
#include "cpu_conf.h" #include "cpu_conf.h"
#include "sched.h" #include "sched.h"
#include "list.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
...@@ -315,6 +316,21 @@ char *thread_stack_init(thread_task_func_t task_func, void *arg, void *stack_sta ...@@ -315,6 +316,21 @@ char *thread_stack_init(thread_task_func_t task_func, void *arg, void *stack_sta
*/ */
void thread_print_msg_queue(void); void thread_print_msg_queue(void);
/**
* @brief Add thread to list, sorted by priority (internal)
*
* This will add @p thread to @p list sorted by the thread priority.
* It reuses the thread's rq_entry field.
* Used internally by msg and mutex implementations.
*
* @note Only use for threads *not on any runqueue* and with interrupts
* disabled.
*
* @param[in] list ptr to list root node
* @param[in] thread thread to add
*/
void thread_add_to_list(list_node_t *list, thread_t *thread);
#ifdef DEVELHELP #ifdef DEVELHELP
/** /**
* @brief Returns the name of a process * @brief Returns the name of a process
......
/* /*
* Copyright (C) 2013 Freie Universität Berlin * Copyright (C) 2015 Kaspar Schleiser <kaspar@schleiser.de>
* 2013 Freie Universität Berlin
* *
* This file is subject to the terms and conditions of the GNU Lesser * This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level * General Public License v2.1. See the file LICENSE in the top level
...@@ -29,102 +30,101 @@ ...@@ -29,102 +30,101 @@
#include "thread.h" #include "thread.h"
#include "irq.h" #include "irq.h"
#include "thread.h" #include "thread.h"
#include "list.h"
#define ENABLE_DEBUG (0) #define ENABLE_DEBUG (0)
#include "debug.h" #include "debug.h"
static void mutex_wait(struct mutex_t *mutex); #define MUTEX_LOCKED ((void*)-1)
int mutex_trylock(struct mutex_t *mutex) int _mutex_lock(mutex_t *mutex, int blocking)
{
DEBUG("%s: trylocking to get mutex. val: %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val));
return atomic_set_to_one(&mutex->val);
}
void mutex_lock(struct mutex_t *mutex)
{
DEBUG("%s: trying to get mutex. val: %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val));
if (atomic_set_to_one(&mutex->val) == 0) {
/* mutex was locked. */
mutex_wait(mutex);
}
}
static void mutex_wait(struct mutex_t *mutex)
{ {
unsigned irqstate = irq_disable(); unsigned irqstate = irq_disable();
DEBUG("%s: Mutex in use. %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val)); DEBUG("%s: Mutex in use.\n", sched_active_thread->name);
if (atomic_set_to_one(&mutex->val)) { if (mutex->queue.next == NULL) {
/* somebody released the mutex. return. */ /* mutex is unlocked. */
DEBUG("%s: mutex_wait early out. %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val)); mutex->queue.next = MUTEX_LOCKED;
DEBUG("%s: mutex_wait early out.\n", sched_active_thread->name);
irq_restore(irqstate); irq_restore(irqstate);
return; return 1;
}
else if (blocking) {
thread_t *me = (thread_t*) sched_active_thread;
DEBUG("%s: Adding node to mutex queue: prio: %" PRIu32 "\n", me->name, (uint32_t)me->priority);
sched_set_status(me, STATUS_MUTEX_BLOCKED);
if (mutex->queue.next == MUTEX_LOCKED) {
mutex->queue.next = (list_node_t*)&me->rq_entry;
mutex->queue.next->next = NULL;
}
else {
thread_add_to_list(&mutex->queue, me);
}
irq_restore(irqstate);
thread_yield_higher();
/* we were woken up by scheduler. waker removed us from queue. we have the mutex now. */
return 1;
}
else {
irq_restore(irqstate);
return 0;
} }
sched_set_status((thread_t*) sched_active_thread, STATUS_MUTEX_BLOCKED);
priority_queue_node_t n;
n.priority = (unsigned int) sched_active_thread->priority;
n.data = (unsigned int) sched_active_thread;
n.next = NULL;
DEBUG("%s: Adding node to mutex queue: prio: %" PRIu32 "\n", sched_active_thread->name, n.priority);
priority_queue_add(&(mutex->queue), &n);
irq_restore(irqstate);
thread_yield_higher();
/* we were woken up by scheduler. waker removed us from queue. we have the mutex now. */
} }
void mutex_unlock(struct mutex_t *mutex) void mutex_unlock(mutex_t *mutex)
{ {
unsigned irqstate = irq_disable(); unsigned irqstate = irq_disable();
DEBUG("mutex_unlock(): val: %u pid: %" PRIkernel_pid "\n", ATOMIC_VALUE(mutex->val), sched_active_pid); DEBUG("mutex_unlock(): queue.next: 0x%08x pid: %" PRIkernel_pid "\n", (unsigned)mutex->queue.next, sched_active_pid);
if (ATOMIC_VALUE(mutex->val) == 0) { if (mutex->queue.next == NULL) {
/* the mutex was not locked */ /* the mutex was not locked */
irq_restore(irqstate); irq_restore(irqstate);
return; return;
} }
priority_queue_node_t *next = priority_queue_remove_head(&(mutex->queue)); if (mutex->queue.next == MUTEX_LOCKED) {
if (!next) { mutex->queue.next = NULL;
/* the mutex was locked and no thread was waiting for it */ /* the mutex was locked and no thread was waiting for it */
ATOMIC_VALUE(mutex->val) = 0;
irq_restore(irqstate); irq_restore(irqstate);
return; return;
} }
thread_t *process = (thread_t *) next->data; list_node_t *next = (list_node_t*) list_remove_head(&mutex->queue);
thread_t *process = container_of((clist_node_t*)next, thread_t, rq_entry);
DEBUG("mutex_unlock: waking up waiting thread %" PRIkernel_pid "\n", process->pid); DEBUG("mutex_unlock: waking up waiting thread %" PRIkernel_pid "\n", process->pid);
sched_set_status(process, STATUS_PENDING); sched_set_status(process, STATUS_PENDING);
if (!mutex->queue.next) {
mutex->queue.next = MUTEX_LOCKED;
}
uint16_t process_priority = process->priority; uint16_t process_priority = process->priority;
irq_restore(irqstate); irq_restore(irqstate);
sched_switch(process_priority); sched_switch(process_priority);
} }
void mutex_unlock_and_sleep(struct mutex_t *mutex) void mutex_unlock_and_sleep(mutex_t *mutex)
{ {
DEBUG("%s: unlocking mutex. val: %u pid: %" PRIkernel_pid ", and taking a nap\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val), sched_active_pid); DEBUG("%s: unlocking mutex. queue.next: 0x%08x pid: %" PRIkernel_pid ", and taking a nap\n", sched_active_thread->name, (unsigned)mutex->queue.next, sched_active_pid);
unsigned irqstate = irq_disable(); unsigned irqstate = irq_disable();
if (ATOMIC_VALUE(mutex->val) != 0) { if (mutex->queue.next) {
priority_queue_node_t *next = priority_queue_remove_head(&(mutex->queue)); if (mutex->queue.next == MUTEX_LOCKED) {
if (next) { mutex->queue.next = NULL;
thread_t *process = (thread_t *) next->data;
DEBUG("%s: waking up waiter.\n", process->name);
sched_set_status(process, STATUS_PENDING);
} }
else { else {
ATOMIC_VALUE(mutex->val) = 0; /* This is safe, interrupts are disabled */ list_node_t *next = list_remove_head(&mutex->queue);
thread_t *process = container_of((clist_node_t*)next, thread_t, rq_entry);
DEBUG("%s: waking up waiter.\n", process->name);
sched_set_status(process, STATUS_PENDING);
if (!mutex->queue.next) {
mutex->queue.next = MUTEX_LOCKED;
}
} }
} }
DEBUG("%s: going to sleep.\n", sched_active_thread->name); DEBUG("%s: going to sleep.\n", sched_active_thread->name);
sched_set_status((thread_t*) sched_active_thread, STATUS_SLEEPING); sched_set_status((thread_t*) sched_active_thread, STATUS_SLEEPING);
irq_restore(irqstate); irq_restore(irqstate);
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <errno.h> #include <errno.h>
#include <stdio.h> #include <stdio.h>
#include "assert.h"
#include "thread.h" #include "thread.h"
#include "irq.h" #include "irq.h"
...@@ -104,6 +105,25 @@ void thread_yield(void) ...@@ -104,6 +105,25 @@ void thread_yield(void)
thread_yield_higher(); thread_yield_higher();
} }
void thread_add_to_list(list_node_t *list, thread_t *thread)
{
assert (thread->status < STATUS_ON_RUNQUEUE);
uint16_t my_prio = thread->priority;
list_node_t *new_node = (list_node_t*)&thread->rq_entry;
while (list->next) {
thread_t *list_entry = container_of((clist_node_t*)list->next, thread_t, rq_entry);
if (list_entry->priority > my_prio) {
break;
}
list = list->next;
}
new_node->next = list->next;
list->next = new_node;
}
#ifdef DEVELHELP #ifdef DEVELHELP
uintptr_t thread_measure_stack_free(char *stack) uintptr_t thread_measure_stack_free(char *stack)
{ {
......
...@@ -44,7 +44,7 @@ class mutex { ...@@ -44,7 +44,7 @@ class mutex {
public: public:
using native_handle_type = mutex_t*; using native_handle_type = mutex_t*;
inline constexpr mutex() noexcept : m_mtx{0, PRIORITY_QUEUE_INIT} {} inline constexpr mutex() noexcept : m_mtx{0} {}
~mutex(); ~mutex();
void lock(); void lock();
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <time.h> #include <time.h>
#include "mutex.h" #include "mutex.h"
#include "priority_queue.h"
#if defined(CPU_CC430) || defined(CPU_MSP430FXYZ) #if defined(CPU_CC430) || defined(CPU_MSP430FXYZ)
# include "msp430_types.h" # include "msp430_types.h"
...@@ -120,7 +121,7 @@ int pthread_cond_destroy(struct pthread_cond_t *cond); ...@@ -120,7 +121,7 @@ int pthread_cond_destroy(struct pthread_cond_t *cond);
* @param[in, out] mutex pre-allocated mutex variable structure. * @param[in, out] mutex pre-allocated mutex variable structure.
* @return returns 0 on success, an errorcode otherwise. * @return returns 0 on success, an errorcode otherwise.
*/ */
int pthread_cond_wait(struct pthread_cond_t *cond, struct mutex_t *mutex); int pthread_cond_wait(struct pthread_cond_t *cond, mutex_t *mutex);
/** /**
* @brief blocks the calling thread until the specified condition cond is signalled * @brief blocks the calling thread until the specified condition cond is signalled
...@@ -129,7 +130,7 @@ int pthread_cond_wait(struct pthread_cond_t *cond, struct mutex_t *mutex); ...@@ -129,7 +130,7 @@ int pthread_cond_wait(struct pthread_cond_t *cond, struct mutex_t *mutex);
* @param[in] abstime pre-allocated timeout. * @param[in] abstime pre-allocated timeout.
* @return returns 0 on success, an errorcode otherwise. * @return returns 0 on success, an errorcode otherwise.
*/ */
int pthread_cond_timedwait(struct pthread_cond_t *cond, struct mutex_t *mutex, const struct timespec *abstime); int pthread_cond_timedwait(struct pthread_cond_t *cond, mutex_t *mutex, const struct timespec *abstime);
/** /**
* @brief unblock at least one of the threads that are blocked on the specified condition variable cond * @brief unblock at least one of the threads that are blocked on the specified condition variable cond
......
...@@ -71,7 +71,7 @@ typedef struct pthread_thread { ...@@ -71,7 +71,7 @@ typedef struct pthread_thread {
} pthread_thread_t; } pthread_thread_t;
static pthread_thread_t *volatile pthread_sched_threads[MAXTHREADS]; static pthread_thread_t *volatile pthread_sched_threads[MAXTHREADS];
static struct mutex_t pthread_mutex; static mutex_t pthread_mutex;
static volatile kernel_pid_t pthread_reaper_pid = KERNEL_PID_UNDEF; static volatile kernel_pid_t pthread_reaper_pid = KERNEL_PID_UNDEF;
......
...@@ -92,7 +92,7 @@ int pthread_cond_destroy(struct pthread_cond_t *cond) ...@@ -92,7 +92,7 @@ int pthread_cond_destroy(struct pthread_cond_t *cond)
return 0; return 0;
} }
int pthread_cond_wait(struct pthread_cond_t *cond, struct mutex_t *mutex) int pthread_cond_wait(struct pthread_cond_t *cond, mutex_t *mutex)
{ {
priority_queue_node_t n; priority_queue_node_t n;
n.priority = sched_active_thread->priority; n.priority = sched_active_thread->priority;
...@@ -118,7 +118,7 @@ int pthread_cond_wait(struct pthread_cond_t *cond, struct mutex_t *mutex) ...@@ -118,7 +118,7 @@ int pthread_cond_wait(struct pthread_cond_t *cond, struct mutex_t *mutex)
return 0; return 0;
} }
int pthread_cond_timedwait(struct pthread_cond_t *cond, struct mutex_t *mutex, const struct timespec *abstime) int pthread_cond_timedwait(struct pthread_cond_t *cond, mutex_t *mutex, const struct timespec *abstime)
{ {
timex_t now, then, reltime; timex_t now, then, reltime;
......
...@@ -36,7 +36,7 @@ struct __pthread_tls_key { ...@@ -36,7 +36,7 @@ struct __pthread_tls_key {
/** /**
* @brief Used while manipulating the TLS of a pthread. * @brief Used while manipulating the TLS of a pthread.
*/ */
static struct mutex_t tls_mutex; static mutex_t tls_mutex;
/** /**
* @brief Find a thread-specific datum. * @brief Find a thread-specific datum.
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment