| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Thu, 12 Oct 2017 17:17:03 +0200 |
| Subject: rtmutex: add mutex implementation based on rtmutex |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| --- |
| include/linux/mutex_rt.h | 130 ++++++++++++++++++++++++++ |
| kernel/locking/mutex-rt.c | 223 ++++++++++++++++++++++++++++++++++++++++++++++ |
| 2 files changed, 353 insertions(+) |
| create mode 100644 include/linux/mutex_rt.h |
| create mode 100644 kernel/locking/mutex-rt.c |
| |
| --- /dev/null |
| +++ b/include/linux/mutex_rt.h |
| @@ -0,0 +1,130 @@ |
| +#ifndef __LINUX_MUTEX_RT_H |
| +#define __LINUX_MUTEX_RT_H |
| + |
| +#ifndef __LINUX_MUTEX_H |
| +#error "Please include mutex.h" |
| +#endif |
| + |
| +#include <linux/rtmutex.h> |
| + |
| +/* FIXME: Just for __lockfunc */ |
| +#include <linux/spinlock.h> |
| + |
| +struct mutex { |
| + struct rt_mutex lock; |
| +#ifdef CONFIG_DEBUG_LOCK_ALLOC |
| + struct lockdep_map dep_map; |
| +#endif |
| +}; |
| + |
| +#define __MUTEX_INITIALIZER(mutexname) \ |
| + { \ |
| + .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ |
| + __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ |
| + } |
| + |
| +#define DEFINE_MUTEX(mutexname) \ |
| + struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) |
| + |
| +extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key); |
| +extern void __lockfunc _mutex_lock(struct mutex *lock); |
| +extern void __lockfunc _mutex_lock_io(struct mutex *lock); |
| +extern void __lockfunc _mutex_lock_io_nested(struct mutex *lock, int subclass); |
| +extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); |
| +extern int __lockfunc _mutex_lock_killable(struct mutex *lock); |
| +extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass); |
| +extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); |
| +extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass); |
| +extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass); |
| +extern int __lockfunc _mutex_trylock(struct mutex *lock); |
| +extern void __lockfunc _mutex_unlock(struct mutex *lock); |
| + |
| +#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock) |
| +#define mutex_lock(l) _mutex_lock(l) |
| +#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l) |
| +#define mutex_lock_killable(l) _mutex_lock_killable(l) |
| +#define mutex_trylock(l) _mutex_trylock(l) |
| +#define mutex_unlock(l) _mutex_unlock(l) |
| +#define mutex_lock_io(l) _mutex_lock_io(l); |
| + |
| +#define __mutex_owner(l) ((l)->lock.owner) |
| + |
| +#ifdef CONFIG_DEBUG_MUTEXES |
| +#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock) |
| +#else |
| +static inline void mutex_destroy(struct mutex *lock) {} |
| +#endif |
| + |
| +#ifdef CONFIG_DEBUG_LOCK_ALLOC |
| +# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s) |
| +# define mutex_lock_interruptible_nested(l, s) \ |
| + _mutex_lock_interruptible_nested(l, s) |
| +# define mutex_lock_killable_nested(l, s) \ |
| + _mutex_lock_killable_nested(l, s) |
| +# define mutex_lock_io_nested(l, s) _mutex_lock_io_nested(l, s) |
| + |
| +# define mutex_lock_nest_lock(lock, nest_lock) \ |
| +do { \ |
| + typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ |
| + _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
| +} while (0) |
| + |
| +#else |
| +# define mutex_lock_nested(l, s) _mutex_lock(l) |
| +# define mutex_lock_interruptible_nested(l, s) \ |
| + _mutex_lock_interruptible(l) |
| +# define mutex_lock_killable_nested(l, s) \ |
| + _mutex_lock_killable(l) |
| +# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) |
| +# define mutex_lock_io_nested(l, s) _mutex_lock_io(l) |
| +#endif |
| + |
| +# define mutex_init(mutex) \ |
| +do { \ |
| + static struct lock_class_key __key; \ |
| + \ |
| + rt_mutex_init(&(mutex)->lock); \ |
| + __mutex_do_init((mutex), #mutex, &__key); \ |
| +} while (0) |
| + |
| +# define __mutex_init(mutex, name, key) \ |
| +do { \ |
| + rt_mutex_init(&(mutex)->lock); \ |
| + __mutex_do_init((mutex), name, key); \ |
| +} while (0) |
| + |
| +/** |
| + * These values are chosen such that FAIL and SUCCESS match the |
| + * values of the regular mutex_trylock(). |
| + */ |
| +enum mutex_trylock_recursive_enum { |
| + MUTEX_TRYLOCK_FAILED = 0, |
| + MUTEX_TRYLOCK_SUCCESS = 1, |
| + MUTEX_TRYLOCK_RECURSIVE, |
| +}; |
| +/** |
| + * mutex_trylock_recursive - trylock variant that allows recursive locking |
| + * @lock: mutex to be locked |
| + * |
| + * This function should not be used, _ever_. It is purely for hysterical GEM |
| + * raisins, and once those are gone this will be removed. |
| + * |
| + * Returns: |
| + * MUTEX_TRYLOCK_FAILED - trylock failed, |
| + * MUTEX_TRYLOCK_SUCCESS - lock acquired, |
| + * MUTEX_TRYLOCK_RECURSIVE - we already owned the lock. |
| + */ |
| +int __rt_mutex_owner_current(struct rt_mutex *lock); |
| + |
| +static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum |
| +mutex_trylock_recursive(struct mutex *lock) |
| +{ |
| + if (unlikely(__rt_mutex_owner_current(&lock->lock))) |
| + return MUTEX_TRYLOCK_RECURSIVE; |
| + |
| + return mutex_trylock(lock); |
| +} |
| + |
| +extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); |
| + |
| +#endif |
| --- /dev/null |
| +++ b/kernel/locking/mutex-rt.c |
| @@ -0,0 +1,223 @@ |
| +/* |
| + * kernel/rt.c |
| + * |
| + * Real-Time Preemption Support |
| + * |
| + * started by Ingo Molnar: |
| + * |
| + * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> |
| + * |
| + * historic credit for proving that Linux spinlocks can be implemented via |
| + * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow |
| + * and others) who prototyped it on 2.4 and did lots of comparative |
| + * research and analysis; TimeSys, for proving that you can implement a |
| + * fully preemptible kernel via the use of IRQ threading and mutexes; |
| + * Bill Huey for persuasively arguing on lkml that the mutex model is the |
| + * right one; and to MontaVista, who ported pmutexes to 2.6. |
| + * |
| + * This code is a from-scratch implementation and is not based on pmutexes, |
| + * but the idea of converting spinlocks to mutexes is used here too. |
| + * |
| + * lock debugging, locking tree, deadlock detection: |
| + * |
| + * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey |
| + * Released under the General Public License (GPL). |
| + * |
| + * Includes portions of the generic R/W semaphore implementation from: |
| + * |
| + * Copyright (c) 2001 David Howells (dhowells@redhat.com). |
| + * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de> |
| + * - Derived also from comments by Linus |
| + * |
| + * Pending ownership of locks and ownership stealing: |
| + * |
| + * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt |
| + * |
| + * (also by Steven Rostedt) |
| + * - Converted single pi_lock to individual task locks. |
| + * |
| + * By Esben Nielsen: |
| + * Doing priority inheritance with help of the scheduler. |
| + * |
| + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> |
| + * - major rework based on Esben Nielsens initial patch |
| + * - replaced thread_info references by task_struct refs |
| + * - removed task->pending_owner dependency |
| + * - BKL drop/reacquire for semaphore style locks to avoid deadlocks |
| + * in the scheduler return path as discussed with Steven Rostedt |
| + * |
| + * Copyright (C) 2006, Kihon Technologies Inc. |
| + * Steven Rostedt <rostedt@goodmis.org> |
| + * - debugged and patched Thomas Gleixner's rework. |
| + * - added back the cmpxchg to the rework. |
| + * - turned atomic require back on for SMP. |
| + */ |
| + |
| +#include <linux/spinlock.h> |
| +#include <linux/rtmutex.h> |
| +#include <linux/sched.h> |
| +#include <linux/delay.h> |
| +#include <linux/module.h> |
| +#include <linux/kallsyms.h> |
| +#include <linux/syscalls.h> |
| +#include <linux/interrupt.h> |
| +#include <linux/plist.h> |
| +#include <linux/fs.h> |
| +#include <linux/futex.h> |
| +#include <linux/hrtimer.h> |
| + |
| +#include "rtmutex_common.h" |
| + |
| +/* |
| + * struct mutex functions |
| + */ |
| +void __mutex_do_init(struct mutex *mutex, const char *name, |
| + struct lock_class_key *key) |
| +{ |
| +#ifdef CONFIG_DEBUG_LOCK_ALLOC |
| + /* |
| + * Make sure we are not reinitializing a held lock: |
| + */ |
| + debug_check_no_locks_freed((void *)mutex, sizeof(*mutex)); |
| + lockdep_init_map(&mutex->dep_map, name, key, 0); |
| +#endif |
| + mutex->lock.save_state = 0; |
| +} |
| +EXPORT_SYMBOL(__mutex_do_init); |
| + |
| +void __lockfunc _mutex_lock(struct mutex *lock) |
| +{ |
| + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
| + __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE); |
| +} |
| +EXPORT_SYMBOL(_mutex_lock); |
| + |
| +void __lockfunc _mutex_lock_io(struct mutex *lock) |
| +{ |
| + int token; |
| + |
| + token = io_schedule_prepare(); |
| + _mutex_lock(lock); |
| + io_schedule_finish(token); |
| +} |
| +EXPORT_SYMBOL_GPL(_mutex_lock_io); |
| + |
| +int __lockfunc _mutex_lock_interruptible(struct mutex *lock) |
| +{ |
| + int ret; |
| + |
| + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
| + ret = __rt_mutex_lock_state(&lock->lock, TASK_INTERRUPTIBLE); |
| + if (ret) |
| + mutex_release(&lock->dep_map, 1, _RET_IP_); |
| + return ret; |
| +} |
| +EXPORT_SYMBOL(_mutex_lock_interruptible); |
| + |
| +int __lockfunc _mutex_lock_killable(struct mutex *lock) |
| +{ |
| + int ret; |
| + |
| + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
| + ret = __rt_mutex_lock_state(&lock->lock, TASK_KILLABLE); |
| + if (ret) |
| + mutex_release(&lock->dep_map, 1, _RET_IP_); |
| + return ret; |
| +} |
| +EXPORT_SYMBOL(_mutex_lock_killable); |
| + |
| +#ifdef CONFIG_DEBUG_LOCK_ALLOC |
| +void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass) |
| +{ |
| + mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); |
| + __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE); |
| +} |
| +EXPORT_SYMBOL(_mutex_lock_nested); |
| + |
| +void __lockfunc _mutex_lock_io_nested(struct mutex *lock, int subclass) |
| +{ |
| + int token; |
| + |
| + token = io_schedule_prepare(); |
| + |
| + mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); |
| + __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE); |
| + |
| + io_schedule_finish(token); |
| +} |
| +EXPORT_SYMBOL_GPL(_mutex_lock_io_nested); |
| + |
| +void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) |
| +{ |
| + mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_); |
| + __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE); |
| +} |
| +EXPORT_SYMBOL(_mutex_lock_nest_lock); |
| + |
| +int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass) |
| +{ |
| + int ret; |
| + |
| + mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); |
| + ret = __rt_mutex_lock_state(&lock->lock, TASK_INTERRUPTIBLE); |
| + if (ret) |
| + mutex_release(&lock->dep_map, 1, _RET_IP_); |
| + return ret; |
| +} |
| +EXPORT_SYMBOL(_mutex_lock_interruptible_nested); |
| + |
| +int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass) |
| +{ |
| + int ret; |
| + |
| + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
| + ret = __rt_mutex_lock_state(&lock->lock, TASK_KILLABLE); |
| + if (ret) |
| + mutex_release(&lock->dep_map, 1, _RET_IP_); |
| + return ret; |
| +} |
| +EXPORT_SYMBOL(_mutex_lock_killable_nested); |
| +#endif |
| + |
| +int __lockfunc _mutex_trylock(struct mutex *lock) |
| +{ |
| + int ret = __rt_mutex_trylock(&lock->lock); |
| + |
| + if (ret) |
| + mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
| + |
| + return ret; |
| +} |
| +EXPORT_SYMBOL(_mutex_trylock); |
| + |
| +void __lockfunc _mutex_unlock(struct mutex *lock) |
| +{ |
| + mutex_release(&lock->dep_map, 1, _RET_IP_); |
| + __rt_mutex_unlock(&lock->lock); |
| +} |
| +EXPORT_SYMBOL(_mutex_unlock); |
| + |
| +/** |
| + * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 |
| + * @cnt: the atomic which we are to dec |
| + * @lock: the mutex to return holding if we dec to 0 |
| + * |
| + * return true and hold lock if we dec to 0, return false otherwise |
| + */ |
| +int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) |
| +{ |
| + /* dec if we can't possibly hit 0 */ |
| + if (atomic_add_unless(cnt, -1, 1)) |
| + return 0; |
| + /* we might hit 0, so take the lock */ |
| + mutex_lock(lock); |
| + if (!atomic_dec_and_test(cnt)) { |
| + /* when we actually did the dec, we didn't hit 0 */ |
| + mutex_unlock(lock); |
| + return 0; |
| + } |
| + /* we hit 0, and we hold the lock */ |
| + return 1; |
| +} |
| +EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |