| From 78795f419d2e51a3a2d7e41fc8ef419cfe98a8d8 Mon Sep 17 00:00:00 2001 |
| From: Steven Rostedt <srostedt@redhat.com> |
| Date: Fri, 3 Jul 2009 08:44:23 -0500 |
| Subject: [PATCH] rtmutex: break out early on first run |
| |
| commit c3c6a61dbff1c6fbbe9a9134e86c2bf19f6671e1 in tip. |
| |
| Lock stealing and non cmpxchg will always go into the slow path. |
| This patch detects the fact that we didn't go through the work of |
| blocking and will exit early. |
| |
| Signed-off-by: Steven Rostedt <srostedt@redhat.com> |
| Signed-off-by: Ingo Molnar <mingo@elte.hu> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |
| diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c |
| index f4c5cde..4d53d03 100644 |
| --- a/kernel/rtmutex.c |
| +++ b/kernel/rtmutex.c |
| @@ -768,6 +768,7 @@ rt_spin_lock_slowlock(struct rt_mutex *lock) |
| struct rt_mutex_waiter waiter; |
| unsigned long saved_state, state, flags; |
| struct task_struct *orig_owner; |
| + int missed = 0; |
| |
| debug_rt_mutex_init_waiter(&waiter); |
| waiter.task = NULL; |
| @@ -792,8 +793,14 @@ rt_spin_lock_slowlock(struct rt_mutex *lock) |
| int saved_lock_depth = current->lock_depth; |
| |
| /* Try to acquire the lock */ |
| - if (do_try_to_take_rt_mutex(lock, STEAL_LATERAL)) |
| + if (do_try_to_take_rt_mutex(lock, STEAL_LATERAL)) { |
| + /* If we never blocked break out now */ |
| + if (!missed) |
| + goto unlock; |
| break; |
| + } |
| + missed = 1; |
| + |
| /* |
| * waiter.task is NULL the first time we come here and |
| * when we have been woken up by the previous owner |
| @@ -850,6 +857,7 @@ rt_spin_lock_slowlock(struct rt_mutex *lock) |
| */ |
| fixup_rt_mutex_waiters(lock); |
| |
| + unlock: |
| raw_spin_unlock_irqrestore(&lock->wait_lock, flags); |
| |
| debug_rt_mutex_free_waiter(&waiter); |
| -- |
| 1.7.1.1 |
| |