| From 7e369dafe08913a97f069c04c7a68b3044e4fdfd Mon Sep 17 00:00:00 2001 |
| From: Gregory Haskins <ghaskins@novell.com> |
| Date: Fri, 3 Jul 2009 08:44:19 -0500 |
| Subject: [PATCH] rtmutex: Add lateral lock stealing |
| |
| commit 74804766938d20010e8fd3a2a5f906f27ca62a90 in tip. |
| |
| The current logic only allows lock stealing to occur if the current task |
| is of higher priority than the pending owner. We can gain signficant |
| throughput improvements (200%+) by allowing the lock-stealing code to |
| include tasks of equal priority. The theory is that the system will make |
| faster progress by allowing the task already on the CPU to take the lock |
| rather than waiting for the system to wake-up a different task. |
| |
| This does add a degree of unfairness, yes. But also note that the users |
| of these locks under non -rt environments have already been using unfair |
| raw spinlocks anyway so the tradeoff is probably worth it. |
| |
| The way I like to think of this is that higher priority tasks should |
| clearly preempt, and lower priority tasks should clearly block. However, |
| if tasks have an identical priority value, then we can think of the |
| scheduler decisions as the tie-breaking parameter. (e.g. tasks that the |
| scheduler picked to run first have a logically higher priority amoung tasks |
| of the same prio). This helps to keep the system "primed" with tasks doing |
| useful work, and the end result is higher throughput. |
| |
| Thanks to Steven Rostedt for pointing out that RT tasks should be excluded |
| to prevent the introduction of an unnatural unbounded latency. |
| |
| [ Steven Rostedt - removed config option to disable ] |
| |
| Signed-off-by: Gregory Haskins <ghaskins@novell.com> |
| Signed-off-by: Steven Rostedt <srostedt@redhat.com> |
| Signed-off-by: Ingo Molnar <mingo@elte.hu> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c |
| index fe9b2fc..3a4e820 100644 |
| --- a/kernel/rtmutex.c |
| +++ b/kernel/rtmutex.c |
| @@ -318,7 +318,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
| * lock yet]: |
| */ |
| static inline int try_to_steal_lock(struct rt_mutex *lock, |
| - struct task_struct *task) |
| + struct task_struct *task, int mode) |
| { |
| struct task_struct *pendowner = rt_mutex_owner(lock); |
| struct rt_mutex_waiter *next; |
| @@ -330,7 +330,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock, |
| return 1; |
| |
| raw_spin_lock(&pendowner->pi_lock); |
| - if (task->prio >= pendowner->prio) { |
| + if (!lock_is_stealable(task, pendowner, mode)) { |
| raw_spin_unlock(&pendowner->pi_lock); |
| return 0; |
| } |
| @@ -383,7 +383,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock, |
| * |
| * Must be called with lock->wait_lock held. |
| */ |
| -static int try_to_take_rt_mutex(struct rt_mutex *lock) |
| +static int do_try_to_take_rt_mutex(struct rt_mutex *lock, int mode) |
| { |
| /* |
| * We have to be careful here if the atomic speedups are |
| @@ -406,7 +406,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock) |
| */ |
| mark_rt_mutex_waiters(lock); |
| |
| - if (rt_mutex_owner(lock) && !try_to_steal_lock(lock, current)) |
| + if (rt_mutex_owner(lock) && !try_to_steal_lock(lock, current, mode)) |
| return 0; |
| |
| /* We got the lock. */ |
| @@ -419,6 +419,11 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock) |
| return 1; |
| } |
| |
| +static inline int try_to_take_rt_mutex(struct rt_mutex *lock) |
| +{ |
| + return do_try_to_take_rt_mutex(lock, STEAL_NORMAL); |
| +} |
| + |
| /* |
| * Task blocks on lock. |
| * |
| @@ -674,7 +679,7 @@ rt_spin_lock_slowlock(struct rt_mutex *lock) |
| init_lists(lock); |
| |
| /* Try to acquire the lock again: */ |
| - if (try_to_take_rt_mutex(lock)) { |
| + if (do_try_to_take_rt_mutex(lock, STEAL_LATERAL)) { |
| raw_spin_unlock_irqrestore(&lock->wait_lock, flags); |
| return; |
| } |
| @@ -696,7 +701,7 @@ rt_spin_lock_slowlock(struct rt_mutex *lock) |
| int saved_lock_depth = current->lock_depth; |
| |
| /* Try to acquire the lock */ |
| - if (try_to_take_rt_mutex(lock)) |
| + if (do_try_to_take_rt_mutex(lock, STEAL_LATERAL)) |
| break; |
| /* |
| * waiter.task is NULL the first time we come here and |
| @@ -1381,7 +1386,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, |
| |
| mark_rt_mutex_waiters(lock); |
| |
| - if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) { |
| + if (!rt_mutex_owner(lock) || |
| + try_to_steal_lock(lock, task, STEAL_NORMAL)) { |
| /* We got the lock for task. */ |
| debug_rt_mutex_lock(lock); |
| rt_mutex_set_owner(lock, task, 0); |
| diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h |
| index 97a2f81..4df690c 100644 |
| --- a/kernel/rtmutex_common.h |
| +++ b/kernel/rtmutex_common.h |
| @@ -129,6 +129,26 @@ extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, |
| struct rt_mutex_waiter *waiter, |
| int detect_deadlock); |
| |
| + |
| +#define STEAL_LATERAL 1 |
| +#define STEAL_NORMAL 0 |
| + |
| +/* |
| + * Note that RT tasks are excluded from lateral-steals to prevent the |
| + * introduction of an unbounded latency |
| + */ |
| +static inline int lock_is_stealable(struct task_struct *task, |
| + struct task_struct *pendowner, int mode) |
| +{ |
| + if (mode == STEAL_NORMAL || rt_task(task)) { |
| + if (task->prio >= pendowner->prio) |
| + return 0; |
| + } else if (task->prio > pendowner->prio) |
| + return 0; |
| + |
| + return 1; |
| +} |
| + |
| #ifdef CONFIG_DEBUG_RT_MUTEXES |
| # include "rtmutex-debug.h" |
| #else |
| -- |
| 1.7.1.1 |
| |