| From 7c3df123e248a20ae083d51e720c415d52b092c9 Mon Sep 17 00:00:00 2001 |
| From: Gregory Haskins <ghaskins@novell.com> |
| Date: Fri, 3 Jul 2009 08:44:20 -0500 |
| Subject: [PATCH] rtmutex: Rearrange the code |
| |
| commit 090e2db9b77d03d264b0a12048448ae49a907e0c in tip. |
| |
| The current logic makes rather coarse adjustments to current->state since |
| it is planning on sleeping anyway. We want to eventually move to an |
| adaptive (e.g. optional sleep) algorithm, so we tighten the scope of the |
| adjustments to bracket the schedule(). This should yield correct behavior |
| with or without the adaptive features that are added later in the series. |
| We add it here as a separate patch for greater review clarity on smaller |
| changes. |
| |
| Signed-off-by: Gregory Haskins <ghaskins@novell.com> |
| Signed-off-by: Ingo Molnar <mingo@elte.hu> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |
| diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c |
| index 3a4e820..543e43c 100644 |
| --- a/kernel/rtmutex.c |
| +++ b/kernel/rtmutex.c |
| @@ -656,6 +656,14 @@ rt_spin_lock_fastunlock(struct rt_mutex *lock, |
| slowfn(lock); |
| } |
| |
| +static inline void |
| +update_current(unsigned long new_state, unsigned long *saved_state) |
| +{ |
| + unsigned long state = xchg(¤t->state, new_state); |
| + if (unlikely(state == TASK_RUNNING)) |
| + *saved_state = TASK_RUNNING; |
| +} |
| + |
| /* |
| * Slow path lock function spin_lock style: this variant is very |
| * careful not to miss any non-lock wakeups. |
| @@ -695,7 +703,7 @@ rt_spin_lock_slowlock(struct rt_mutex *lock) |
| * saved_state accordingly. If we did not get a real wakeup |
| * then we return with the saved state. |
| */ |
| - saved_state = xchg(¤t->state, TASK_UNINTERRUPTIBLE); |
| + saved_state = current->state; |
| |
| for (;;) { |
| int saved_lock_depth = current->lock_depth; |
| @@ -725,13 +733,14 @@ rt_spin_lock_slowlock(struct rt_mutex *lock) |
| |
| debug_rt_mutex_print_deadlock(&waiter); |
| |
| - schedule_rt_mutex(lock); |
| + update_current(TASK_UNINTERRUPTIBLE, &saved_state); |
| + if (waiter.task) |
| + schedule_rt_mutex(lock); |
| + else |
| + update_current(TASK_RUNNING_MUTEX, &saved_state); |
| |
| raw_spin_lock_irqsave(&lock->wait_lock, flags); |
| current->lock_depth = saved_lock_depth; |
| - state = xchg(¤t->state, TASK_UNINTERRUPTIBLE); |
| - if (unlikely(state == TASK_RUNNING)) |
| - saved_state = TASK_RUNNING; |
| } |
| |
| state = xchg(¤t->state, saved_state); |
| -- |
| 1.7.1.1 |
| |