| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Thu, 12 Oct 2017 16:36:39 +0200 |
| Subject: rtmutex: export lockdep-less version of rt_mutex's lock, |
| trylock and unlock |
| |
| Required for lock implementation ontop of rtmutex. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| --- |
| kernel/locking/rtmutex.c | 59 ++++++++++++++++++++++++++-------------- |
| kernel/locking/rtmutex_common.h | 3 ++ |
| 2 files changed, 42 insertions(+), 20 deletions(-) |
| |
| --- a/kernel/locking/rtmutex.c |
| +++ b/kernel/locking/rtmutex.c |
| @@ -1493,12 +1493,33 @@ rt_mutex_fastunlock(struct rt_mutex *loc |
| rt_mutex_postunlock(&wake_q); |
| } |
| |
| -static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) |
| +int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state) |
| { |
| might_sleep(); |
| + return rt_mutex_fastlock(lock, state, rt_mutex_slowlock); |
| +} |
| + |
| +/** |
| + * rt_mutex_lock_state - lock a rt_mutex with a given state |
| + * |
| + * @lock: The rt_mutex to be locked |
| + * @state: The state to set when blocking on the rt_mutex |
| + */ |
| +static inline int __sched rt_mutex_lock_state(struct rt_mutex *lock, |
| + unsigned int subclass, int state) |
| +{ |
| + int ret; |
| |
| mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
| - rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); |
| + ret = __rt_mutex_lock_state(lock, state); |
| + if (ret) |
| + mutex_release(&lock->dep_map, _RET_IP_); |
| + return ret; |
| +} |
| + |
| +static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) |
| +{ |
| + rt_mutex_lock_state(lock, subclass, TASK_UNINTERRUPTIBLE); |
| } |
| |
| #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| @@ -1539,16 +1560,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); |
| */ |
| int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) |
| { |
| - int ret; |
| - |
| - might_sleep(); |
| - |
| - mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
| - ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); |
| - if (ret) |
| - mutex_release(&lock->dep_map, _RET_IP_); |
| - |
| - return ret; |
| + return rt_mutex_lock_state(lock, 0, TASK_INTERRUPTIBLE); |
| } |
| EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); |
| |
| @@ -1574,13 +1586,10 @@ int __sched __rt_mutex_futex_trylock(str |
| * Returns: |
| * 0 on success |
| * -EINTR when interrupted by a signal |
| - * -EDEADLK when the lock would deadlock (when deadlock detection is on) |
| */ |
| int __sched rt_mutex_lock_killable(struct rt_mutex *lock) |
| { |
| - might_sleep(); |
| - |
| - return rt_mutex_fastlock(lock, TASK_KILLABLE, rt_mutex_slowlock); |
| + return rt_mutex_lock_state(lock, 0, TASK_KILLABLE); |
| } |
| EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); |
| |
| @@ -1615,6 +1624,14 @@ rt_mutex_timed_lock(struct rt_mutex *loc |
| } |
| EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); |
| |
| +int __sched __rt_mutex_trylock(struct rt_mutex *lock) |
| +{ |
| + if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) |
| + return 0; |
| + |
| + return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); |
| +} |
| + |
| /** |
| * rt_mutex_trylock - try to lock a rt_mutex |
| * |
| @@ -1630,10 +1647,7 @@ int __sched rt_mutex_trylock(struct rt_m |
| { |
| int ret; |
| |
| - if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) |
| - return 0; |
| - |
| - ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); |
| + ret = __rt_mutex_trylock(lock); |
| if (ret) |
| mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
| |
| @@ -1641,6 +1655,11 @@ int __sched rt_mutex_trylock(struct rt_m |
| } |
| EXPORT_SYMBOL_GPL(rt_mutex_trylock); |
| |
| +void __sched __rt_mutex_unlock(struct rt_mutex *lock) |
| +{ |
| + rt_mutex_fastunlock(lock, rt_mutex_slowunlock); |
| +} |
| + |
| /** |
| * rt_mutex_unlock - unlock a rt_mutex |
| * |
| --- a/kernel/locking/rtmutex_common.h |
| +++ b/kernel/locking/rtmutex_common.h |
| @@ -162,6 +162,9 @@ extern bool __rt_mutex_futex_unlock(stru |
| extern void rt_mutex_postunlock(struct wake_q_head *wake_q); |
| /* RW semaphore special interface */ |
| |
| +extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state); |
| +extern int __rt_mutex_trylock(struct rt_mutex *lock); |
| +extern void __rt_mutex_unlock(struct rt_mutex *lock); |
| int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, |
| struct hrtimer_sleeper *timeout, |
| enum rtmutex_chainwalk chwalk, |