blob: 3d45a619bb62aa9c530514c2fe1b75dc855d8870 [file] [log] [blame]
From 48edf2eb0a42fd23495f9ce1dd0df40175fce40a Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 3 Jul 2009 08:44:24 -0500
Subject: [PATCH] rtmutex: remove uber optimization
commit 640f0c0599f850474cd1b676dfb61c338a79431c in tip.
The adaptive spin patches introduced an overdesigned optimization for
the adaptive path. This avoidance of those code pathes is not worth
the extra conditionals and furthermore it is inconsistent in itself.
Remove it and use the same mechanism as the other lock pathes. That
way we have a consistent state manipulation scheme and less extra
cases.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 0421098..a4c8b7e 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -770,7 +770,6 @@ rt_spin_lock_slowlock(struct rt_mutex *lock)
struct rt_mutex_waiter waiter;
unsigned long saved_state, flags;
struct task_struct *orig_owner;
- int missed = 0;
debug_rt_mutex_init_waiter(&waiter);
waiter.task = NULL;
@@ -791,19 +790,14 @@ rt_spin_lock_slowlock(struct rt_mutex *lock)
* about original state TASK_INTERRUPTIBLE as well, as we
* could miss a wakeup_interruptible()
*/
- saved_state = current->state;
+ saved_state = rt_set_current_blocked_state(current->state);
for (;;) {
int saved_lock_depth = current->lock_depth;
/* Try to acquire the lock */
- if (do_try_to_take_rt_mutex(lock, STEAL_LATERAL)) {
- /* If we never blocked break out now */
- if (!missed)
- goto unlock;
+ if (do_try_to_take_rt_mutex(lock, STEAL_LATERAL))
break;
- }
- missed = 1;
/*
* waiter.task is NULL the first time we come here and
@@ -832,12 +826,6 @@ rt_spin_lock_slowlock(struct rt_mutex *lock)
if (adaptive_wait(&waiter, orig_owner)) {
put_task_struct(orig_owner);
- saved_state = rt_set_current_blocked_state(saved_state);
- /*
- * The xchg() in update_current() is an implicit
- * barrier which we rely upon to ensure current->state
- * is visible before we test waiter.task.
- */
if (waiter.task)
schedule_rt_mutex(lock);
} else
@@ -845,6 +833,7 @@ rt_spin_lock_slowlock(struct rt_mutex *lock)
raw_spin_lock_irqsave(&lock->wait_lock, flags);
current->lock_depth = saved_lock_depth;
+ saved_state = rt_set_current_blocked_state(saved_state);
}
rt_restore_current_state(saved_state);
@@ -863,7 +852,6 @@ rt_spin_lock_slowlock(struct rt_mutex *lock)
*/
fixup_rt_mutex_waiters(lock);
- unlock:
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
debug_rt_mutex_free_waiter(&waiter);
--
1.7.1.1