blob: ae800691942e0876696598891bda74ad8f05c6a4 [file] [log] [blame]
From 1c76a7be644b423797184556f99f617ca5ec2c3c Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 3 Jul 2009 08:44:23 -0500
Subject: [PATCH] rtmutex: unify state manipulation
commit d5ad85bc76390226b203af351bbae79ee901ca8a in tip.
The manipulation of the waiter task state is copied all over the place
with slightly different details. Use one set of functions to reduce
duplicated code and make the handling consistent for all instances.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index b649eb0..0421098 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -696,13 +696,6 @@ rt_spin_lock_fastunlock(struct rt_mutex *lock,
slowfn(lock);
}
-static inline void
-update_current(unsigned long new_state, unsigned long *saved_state)
-{
- unsigned long state = xchg(&current->state, new_state);
- if (unlikely(state == TASK_RUNNING))
- *saved_state = TASK_RUNNING;
-}
#ifdef CONFIG_SMP
static int adaptive_wait(struct rt_mutex_waiter *waiter,
@@ -734,6 +727,34 @@ static int adaptive_wait(struct rt_mutex_waiter *waiter,
#endif
/*
+ * The state setting needs to preserve the original state and needs to
+ * take care of non rtmutex wakeups.
+ */
+static inline unsigned long
+rt_set_current_blocked_state(unsigned long saved_state)
+{
+ unsigned long state;
+
+ state = xchg(&current->state, TASK_UNINTERRUPTIBLE);
+ /*
+ * Take care of non rtmutex wakeups. rtmutex wakeups
+ * set the state to TASK_RUNNING_MUTEX.
+ */
+ if (state == TASK_RUNNING)
+ saved_state = TASK_RUNNING;
+
+ return saved_state;
+}
+
+static inline void rt_restore_current_state(unsigned long saved_state)
+{
+ unsigned long state = xchg(&current->state, saved_state);
+
+ if (state == TASK_RUNNING)
+ current->state = TASK_RUNNING;
+}
+
+/*
* Slow path lock function spin_lock style: this variant is very
* careful not to miss any non-lock wakeups.
*
@@ -747,7 +768,7 @@ static void noinline __sched
rt_spin_lock_slowlock(struct rt_mutex *lock)
{
struct rt_mutex_waiter waiter;
- unsigned long saved_state, state, flags;
+ unsigned long saved_state, flags;
struct task_struct *orig_owner;
int missed = 0;
@@ -766,7 +787,9 @@ rt_spin_lock_slowlock(struct rt_mutex *lock)
* of the lock sleep/wakeup mechanism. When we get a real
* wakeup the task->state is TASK_RUNNING and we change
* saved_state accordingly. If we did not get a real wakeup
- * then we return with the saved state.
+ * then we return with the saved state. We need to be careful
+ * about original state TASK_INTERRUPTIBLE as well, as we
+ * could miss a wakeup_interruptible()
*/
saved_state = current->state;
@@ -808,7 +831,8 @@ rt_spin_lock_slowlock(struct rt_mutex *lock)
if (adaptive_wait(&waiter, orig_owner)) {
put_task_struct(orig_owner);
- update_current(TASK_UNINTERRUPTIBLE, &saved_state);
+
+ saved_state = rt_set_current_blocked_state(saved_state);
/*
* The xchg() in update_current() is an implicit
* barrier which we rely upon to ensure current->state
@@ -823,9 +847,7 @@ rt_spin_lock_slowlock(struct rt_mutex *lock)
current->lock_depth = saved_lock_depth;
}
- state = xchg(&current->state, saved_state);
- if (unlikely(state == TASK_RUNNING))
- current->state = TASK_RUNNING;
+ rt_restore_current_state(saved_state);
/*
* Extremely rare case, if we got woken up by a non-mutex wakeup,
--
1.7.1.1