blob: b72578e2a8a6f3cd9611c0182697a622f56e7eb6 [file] [log] [blame]
Subject: locking/rtmutex: Make inner working of rt_spin_slow_lock() accessible
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 12 Jul 2017 10:24:49 +0200
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/locking/rtmutex.c | 40 ++++++++++++++++++++++------------------
kernel/locking/rtmutex_common.h | 4 ++++
2 files changed, 26 insertions(+), 18 deletions(-)
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1045,21 +1045,16 @@ static int task_blocks_on_rt_mutex(struc
* We store the current state under p->pi_lock in p->saved_state and
* the try_to_wake_up() code handles this accordingly.
*/
-static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
+void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter,
+ unsigned long flags)
{
struct task_struct *lock_owner, *self = current;
- struct rt_mutex_waiter waiter, *top_waiter;
- unsigned long flags;
+ struct rt_mutex_waiter *top_waiter;
int ret;
- rt_mutex_init_waiter(&waiter, true);
-
- raw_spin_lock_irqsave(&lock->wait_lock, flags);
-
- if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL))
return;
- }
BUG_ON(rt_mutex_owner(lock) == self);
@@ -1074,12 +1069,12 @@ static void noinline __sched rt_spin_lo
__set_current_state_no_track(TASK_UNINTERRUPTIBLE);
raw_spin_unlock(&self->pi_lock);
- ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK);
+ ret = task_blocks_on_rt_mutex(lock, waiter, self, RT_MUTEX_MIN_CHAINWALK);
BUG_ON(ret);
for (;;) {
/* Try to acquire the lock again. */
- if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
+ if (__try_to_take_rt_mutex(lock, self, waiter, STEAL_LATERAL))
break;
top_waiter = rt_mutex_top_waiter(lock);
@@ -1087,9 +1082,9 @@ static void noinline __sched rt_spin_lo
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
- debug_rt_mutex_print_deadlock(&waiter);
+ debug_rt_mutex_print_deadlock(waiter);
- if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
+ if (top_waiter != waiter || adaptive_wait(lock, lock_owner))
schedule();
raw_spin_lock_irqsave(&lock->wait_lock, flags);
@@ -1117,11 +1112,20 @@ static void noinline __sched rt_spin_lo
*/
fixup_rt_mutex_waiters(lock);
- BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
- BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry));
+ BUG_ON(rt_mutex_has_waiters(lock) && waiter == rt_mutex_top_waiter(lock));
+ BUG_ON(!RB_EMPTY_NODE(&waiter->tree_entry));
+}
- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
+{
+ struct rt_mutex_waiter waiter;
+ unsigned long flags;
+ rt_mutex_init_waiter(&waiter, true);
+
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+ rt_spin_lock_slowlock_locked(lock, &waiter, flags);
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
debug_rt_mutex_free_waiter(&waiter);
}
@@ -1131,7 +1135,7 @@ static bool __sched __rt_mutex_unlock_co
/*
* Slow path to release a rt_mutex spin_lock style
*/
-static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
+void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
{
unsigned long flags;
DEFINE_WAKE_Q(wake_q);
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -139,6 +139,10 @@ int __sched rt_mutex_slowlock_locked(str
enum rtmutex_chainwalk chwalk,
struct ww_acquire_ctx *ww_ctx,
struct rt_mutex_waiter *waiter);
+void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter,
+ unsigned long flags);
+void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock);
#ifdef CONFIG_DEBUG_RT_MUTEXES
# include "rtmutex-debug.h"