blob: 09e58dae9e0633770025314f3e686b139f1f6d07 [file] [log] [blame]
From 799fcf4ba0a279c3bc66c89d08635dc4ec5a344e Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 26 Jul 2009 18:51:45 +0200
Subject: [PATCH] sched: implement wake functions for the priority boosting code
commit 2b56f06587c2fc6a093271e95bd8151059e719a3 in tip.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
---
include/linux/sched.h | 3 +++
kernel/sched.c | 46 +++++++++++++++++++++++++++++++++++-----------
2 files changed, 38 insertions(+), 11 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0f0cc03..2ef88d7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2032,6 +2032,9 @@ extern void do_timer(unsigned long ticks);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
+extern int wake_up_process_mutex(struct task_struct * tsk);
+extern int wake_up_process_sync(struct task_struct * tsk);
+extern int wake_up_process_mutex_sync(struct task_struct * tsk);
extern void wake_up_new_task(struct task_struct *tsk,
unsigned long clone_flags);
#ifdef CONFIG_SMP
diff --git a/kernel/sched.c b/kernel/sched.c
index b611e03..b962381 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2368,8 +2368,9 @@ int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
*
* returns failure only if the task is already active.
*/
-static int try_to_wake_up(struct task_struct *p, unsigned int state,
- int wake_flags)
+static int
+try_to_wake_up(struct task_struct *p, unsigned int state,
+ int wake_flags, int mutex)
{
int cpu, orig_cpu, this_cpu, success = 0;
unsigned long flags;
@@ -2483,7 +2484,10 @@ out_running:
trace_sched_wakeup(rq, p, success);
check_preempt_curr(rq, p, wake_flags);
- p->state = TASK_RUNNING;
+ if (mutex)
+ p->state = TASK_RUNNING_MUTEX;
+ else
+ p->state = TASK_RUNNING;
#ifdef CONFIG_SMP
if (p->sched_class->task_woken)
p->sched_class->task_woken(rq, p);
@@ -2519,13 +2523,31 @@ out:
*/
int wake_up_process(struct task_struct *p)
{
- return try_to_wake_up(p, TASK_ALL, 0);
+ return try_to_wake_up(p, TASK_ALL, 0, 0);
}
EXPORT_SYMBOL(wake_up_process);
+int wake_up_process_sync(struct task_struct * p)
+{
+ return try_to_wake_up(p, TASK_ALL, 1, 0);
+}
+EXPORT_SYMBOL(wake_up_process_sync);
+
+int wake_up_process_mutex(struct task_struct * p)
+{
+ return try_to_wake_up(p, TASK_ALL, 0, 1);
+}
+EXPORT_SYMBOL(wake_up_process_mutex);
+
+int wake_up_process_mutex_sync(struct task_struct * p)
+{
+ return try_to_wake_up(p, TASK_ALL, 1, 1);
+}
+EXPORT_SYMBOL(wake_up_process_mutex_sync);
+
int wake_up_state(struct task_struct *p, unsigned int state)
{
- return try_to_wake_up(p, state, 0);
+ return try_to_wake_up(p, state | TASK_RUNNING_MUTEX, 0, 0);
}
/*
@@ -3737,7 +3759,8 @@ need_resched_nonpreemptible:
update_rq_clock(rq);
clear_tsk_need_resched(prev);
- if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+ if ((prev->state & ~TASK_RUNNING_MUTEX) &&
+ !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev)))
prev->state = TASK_RUNNING;
else
@@ -3938,7 +3961,8 @@ asmlinkage void __sched preempt_schedule_irq(void)
int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
void *key)
{
- return try_to_wake_up(curr->private, mode, wake_flags);
+ return try_to_wake_up(curr->private, mode | TASK_RUNNING_MUTEX,
+ wake_flags, 0);
}
EXPORT_SYMBOL(default_wake_function);
@@ -3981,7 +4005,7 @@ void __wake_up(wait_queue_head_t *q, unsigned int mode,
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
- __wake_up_common(q, mode, nr_exclusive, 0, key);
+ __wake_up_common(q, mode, nr_exclusive, 1, key);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(__wake_up);
@@ -4061,7 +4085,7 @@ void complete(struct completion *x)
spin_lock_irqsave(&x->wait.lock, flags);
x->done++;
- __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
+ __wake_up_common(&x->wait, TASK_NORMAL, 1, 1, NULL);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete);
@@ -4081,7 +4105,7 @@ void complete_all(struct completion *x)
spin_lock_irqsave(&x->wait.lock, flags);
x->done += UINT_MAX/2;
- __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
+ __wake_up_common(&x->wait, TASK_NORMAL, 0, 1, NULL);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete_all);
@@ -5396,7 +5420,7 @@ static inline void sched_init_granularity(void)
update_sysctl();
}
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT)
/*
* This is how migration works:
*
--
1.7.0.4