| From 37787f09f7bb53627067f8e576ae6fec67440f05 Mon Sep 17 00:00:00 2001 |
| From: Gregory Haskins <ghaskins@novell.com> |
| Date: Fri, 3 Jul 2009 08:44:22 -0500 |
| Subject: [PATCH] rtmutex: Remove the extra call to try_to_take_lock |
| |
| commit 73d33cafab165763a64e0e6f1f842193fff4b18c in tip. |
| |
| [ The following text is in the "utf-8" character set. ] |
| [ Your display is set for the "iso-8859-1" character set. ] |
| [ Some characters may be displayed incorrectly. ] |
| |
| From: Peter W. Morreale <pmorreale@novell.com> |
| |
| Remove the redundant attempt to get the lock. While it is true that the |
| exit path with this patch adds an un-necessary xchg (in the event the |
| lock is granted without further traversal in the loop) experimentation |
| shows that we almost never encounter this situation. |
| |
| Signed-off-by: Peter W. Morreale <pmorreale@novell.com> |
| Signed-off-by: Gregory Haskins <ghaskins@novell.com> |
| Signed-off-by: Ingo Molnar <mingo@elte.hu> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |
| diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c |
| index 5017ecb..f4c5cde 100644 |
| --- a/kernel/rtmutex.c |
| +++ b/kernel/rtmutex.c |
| @@ -775,12 +775,6 @@ rt_spin_lock_slowlock(struct rt_mutex *lock) |
| raw_spin_lock_irqsave(&lock->wait_lock, flags); |
| init_lists(lock); |
| |
| - /* Try to acquire the lock again: */ |
| - if (do_try_to_take_rt_mutex(lock, STEAL_LATERAL)) { |
| - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); |
| - return; |
| - } |
| - |
| BUG_ON(rt_mutex_owner(lock) == current); |
| |
| /* |
| -- |
| 1.7.1.1 |
| |