blob: 082d7f77d3f7bea78424a81a46524ab35f920ad1 [file] [log] [blame]
From 993967ecc4b7685ee320b50fdc3d04267d9bd9a0 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 21 Feb 2010 19:23:36 +0100
Subject: [PATCH] sched: Extend activate_task to allow queueing to the head of a list
commit babe95bad86cba3843cb53d1cee8ac39c491a64a in tip.
The ability of enqueueing a task to the head of a SCHED_FIFO priority
list is required to fix some violations of POSIX scheduling policy.
Extend activate_task with a "head" argument and fix up all callers.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/kernel/sched.c b/kernel/sched.c
index 5c260f0..abb9f14 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1967,12 +1967,13 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
/*
* activate_task - move a task to the runqueue.
*/
-static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
+static void
+activate_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
{
if (task_contributes_to_load(p))
rq->nr_uninterruptible--;
- enqueue_task(rq, p, wakeup, false);
+ enqueue_task(rq, p, wakeup, head);
inc_nr_running(rq);
}
@@ -2524,7 +2525,7 @@ out_activate:
schedstat_inc(p, se.nr_wakeups_local);
else
schedstat_inc(p, se.nr_wakeups_remote);
- activate_task(rq, p, 1);
+ activate_task(rq, p, 1, false);
success = 1;
/*
@@ -2791,7 +2792,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
BUG_ON(p->state != TASK_WAKING);
p->state = TASK_RUNNING;
update_rq_clock(rq);
- activate_task(rq, p, 0);
+ activate_task(rq, p, 0, false);
trace_sched_wakeup_new(rq, p, 1);
check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
@@ -4868,7 +4869,7 @@ recheck:
if (running)
p->sched_class->set_curr_task(rq);
if (on_rq) {
- activate_task(rq, p, 0);
+ activate_task(rq, p, 0, false);
check_class_changed(rq, p, prev_class, oldprio, running);
}
@@ -5781,7 +5782,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
if (p->se.on_rq) {
deactivate_task(rq_src, p, 0);
set_task_cpu(p, dest_cpu);
- activate_task(rq_dest, p, 0);
+ activate_task(rq_dest, p, 0, false);
check_preempt_curr(rq_dest, p, 0);
}
done:
@@ -5949,7 +5950,7 @@ void sched_idle_next(void)
__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
update_rq_clock(rq);
- activate_task(rq, p, 0);
+ activate_task(rq, p, 0, false);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -8251,7 +8252,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
deactivate_task(rq, p, 0);
__setscheduler(rq, p, SCHED_NORMAL, 0);
if (on_rq) {
- activate_task(rq, p, 0);
+ activate_task(rq, p, 0, false);
resched_task(rq->curr);
}
}
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 4881995..4c47b93 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1828,7 +1828,7 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
{
deactivate_task(src_rq, p, 0);
set_task_cpu(p, this_cpu);
- activate_task(this_rq, p, 0);
+ activate_task(this_rq, p, 0, false);
check_preempt_curr(this_rq, p, 0);
}
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 365430c..216c101 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1424,7 +1424,7 @@ static int push_rt_task(struct rq *rq)
deactivate_task(rq, next_task, 0);
set_task_cpu(next_task, lowest_rq->cpu);
- activate_task(lowest_rq, next_task, 0);
+ activate_task(lowest_rq, next_task, 0, false);
resched_task(lowest_rq->curr);
@@ -1507,7 +1507,7 @@ static int pull_rt_task(struct rq *this_rq)
deactivate_task(src_rq, p, 0);
set_task_cpu(p, this_cpu);
- activate_task(this_rq, p, 0);
+ activate_task(this_rq, p, 0, false);
/*
* We continue with the search, just in
* case there's an even higher prio task
--
1.7.1.1