| From: Peter Zijlstra <peterz@infradead.org> |
| Date: Mon, 30 Sep 2019 18:15:44 +0200 |
| Subject: [PATCH] locking/rtmutex: Clean ->pi_blocked_on in the error case |
| |
| The function rt_mutex_wait_proxy_lock() cleans ->pi_blocked_on in case |
| of failure (timeout, signal). The same cleanup is required in |
| __rt_mutex_start_proxy_lock(). |
| In both the cases the tasks was interrupted by a signal or timeout while |
| acquiring the lock and after the interruption it longer blocks on the |
| lock. |
| |
| Fixes: 1a1fb985f2e2b ("futex: Handle early deadlock return correctly") |
| Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> |
| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| --- |
| kernel/locking/rtmutex.c | 43 +++++++++++++++++++++++++------------------ |
| 1 file changed, 25 insertions(+), 18 deletions(-) |
| |
| --- a/kernel/locking/rtmutex.c |
| +++ b/kernel/locking/rtmutex.c |
| @@ -2145,6 +2145,26 @@ void rt_mutex_proxy_unlock(struct rt_mut |
| rt_mutex_set_owner(lock, NULL); |
| } |
| |
| +static void fixup_rt_mutex_blocked(struct rt_mutex *lock) |
| +{ |
| + struct task_struct *tsk = current; |
| + /* |
| + * RT has a problem here when the wait got interrupted by a timeout |
| + * or a signal. task->pi_blocked_on is still set. The task must |
| + * acquire the hash bucket lock when returning from this function. |
| + * |
| + * If the hash bucket lock is contended then the |
| + * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in |
| + * task_blocks_on_rt_mutex() will trigger. This can be avoided by |
| + * clearing task->pi_blocked_on which removes the task from the |
| + * boosting chain of the rtmutex. That's correct because the task |
| + * is not longer blocked on it. |
| + */ |
| + raw_spin_lock(&tsk->pi_lock); |
| + tsk->pi_blocked_on = NULL; |
| + raw_spin_unlock(&tsk->pi_lock); |
| +} |
| + |
| /** |
| * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task |
| * @lock: the rt_mutex to take |
| @@ -2217,6 +2237,9 @@ int __rt_mutex_start_proxy_lock(struct r |
| ret = 0; |
| } |
| |
| + if (ret) |
| + fixup_rt_mutex_blocked(lock); |
| + |
| debug_rt_mutex_print_deadlock(waiter); |
| |
| return ret; |
| @@ -2297,7 +2320,6 @@ int rt_mutex_wait_proxy_lock(struct rt_m |
| struct hrtimer_sleeper *to, |
| struct rt_mutex_waiter *waiter) |
| { |
| - struct task_struct *tsk = current; |
| int ret; |
| |
| raw_spin_lock_irq(&lock->wait_lock); |
| @@ -2309,23 +2331,8 @@ int rt_mutex_wait_proxy_lock(struct rt_m |
| * have to fix that up. |
| */ |
| fixup_rt_mutex_waiters(lock); |
| - /* |
| - * RT has a problem here when the wait got interrupted by a timeout |
| - * or a signal. task->pi_blocked_on is still set. The task must |
| - * acquire the hash bucket lock when returning from this function. |
| - * |
| - * If the hash bucket lock is contended then the |
| - * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in |
| - * task_blocks_on_rt_mutex() will trigger. This can be avoided by |
| - * clearing task->pi_blocked_on which removes the task from the |
| - * boosting chain of the rtmutex. That's correct because the task |
| - * is not longer blocked on it. |
| - */ |
| - if (ret) { |
| - raw_spin_lock(&tsk->pi_lock); |
| - tsk->pi_blocked_on = NULL; |
| - raw_spin_unlock(&tsk->pi_lock); |
| - } |
| + if (ret) |
| + fixup_rt_mutex_blocked(lock); |
| |
| raw_spin_unlock_irq(&lock->wait_lock); |
| |