| From 7f5410541b8c89b285cbd28f8d66bd42caf9d81d Mon Sep 17 00:00:00 2001 |
| From: Peter Zijlstra <a.p.zijlstra@chello.nl> |
| Date: Mon, 31 May 2010 12:37:30 +0200 |
| Subject: [PATCH] sched: Fix wake_affine() vs RT tasks |
| |
| commit cc4a826a21dae60fd2c1f03beeb576280a5808ec in tip. |
| |
| Mike reports that since e9e9250b (sched: Scale down cpu_power due to |
| RT tasks), wake_affine() goes funny on RT tasks due to them still |
| having a !0 weight and wake_affine() still subtracts that from the rq |
| weight. |
| |
| Since nobody should be using se->weight for RT tasks, set the value to |
| zero. Also, since we now use ->cpu_power to normalize rq weights to |
| account for RT cpu usage, add that factor into the imbalance |
| computation. |
| |
| [PG: account for sched --> sched_fair code relocations] |
| |
| Reported-by: Mike Galbraith <efault@gmx.de> |
| Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> |
| LKML-Reference: <1275316109.27810.22969.camel@twins> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/kernel/sched.c b/kernel/sched.c |
| index a196eb8..d6e9062 100644 |
| --- a/kernel/sched.c |
| +++ b/kernel/sched.c |
| @@ -555,6 +555,8 @@ struct rq { |
| struct root_domain *rd; |
| struct sched_domain *sd; |
| |
| + unsigned long cpu_power; |
| + |
| unsigned char idle_at_tick; |
| /* For active balancing */ |
| int post_schedule; |
| @@ -1560,24 +1562,9 @@ static unsigned long target_load(int cpu, int type) |
| return max(rq->cpu_load[type-1], total); |
| } |
| |
| -static struct sched_group *group_of(int cpu) |
| -{ |
| - struct sched_domain *sd = rcu_dereference_sched(cpu_rq(cpu)->sd); |
| - |
| - if (!sd) |
| - return NULL; |
| - |
| - return sd->groups; |
| -} |
| - |
| static unsigned long power_of(int cpu) |
| { |
| - struct sched_group *group = group_of(cpu); |
| - |
| - if (!group) |
| - return SCHED_LOAD_SCALE; |
| - |
| - return group->cpu_power; |
| + return cpu_rq(cpu)->cpu_power; |
| } |
| |
| static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); |
| @@ -1917,8 +1904,8 @@ static void dec_nr_running(struct rq *rq) |
| static void set_load_weight(struct task_struct *p) |
| { |
| if (task_has_rt_policy(p)) { |
| - p->se.load.weight = prio_to_weight[0] * 2; |
| - p->se.load.inv_weight = prio_to_wmult[0] >> 1; |
| + p->se.load.weight = 0; |
| + p->se.load.inv_weight = WMULT_CONST; |
| return; |
| } |
| |
| @@ -8152,6 +8139,7 @@ void __init sched_init(void) |
| #ifdef CONFIG_SMP |
| rq->sd = NULL; |
| rq->rd = NULL; |
| + rq->cpu_power = SCHED_LOAD_SCALE; |
| rq->post_schedule = 0; |
| rq->active_balance = 0; |
| rq->next_balance = jiffies; |
| diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c |
| index 07132f0..1693baf 100644 |
| --- a/kernel/sched_fair.c |
| +++ b/kernel/sched_fair.c |
| @@ -1248,7 +1248,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) |
| unsigned long this_load, load; |
| int idx, this_cpu, prev_cpu; |
| unsigned long tl_per_task; |
| - unsigned int imbalance; |
| struct task_group *tg; |
| unsigned long weight; |
| int balanced; |
| @@ -1287,8 +1286,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) |
| tg = task_group(p); |
| weight = p->se.load.weight; |
| |
| - imbalance = 100 + (sd->imbalance_pct - 100) / 2; |
| - |
| /* |
| * In low-load situations, where prev_cpu is idle and this_cpu is idle |
| * due to the sync cause above having dropped this_load to 0, we'll |
| @@ -1298,9 +1295,21 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) |
| * Otherwise check if either cpus are near enough in load to allow this |
| * task to be woken on this_cpu. |
| */ |
| - balanced = !this_load || |
| - 100*(this_load + effective_load(tg, this_cpu, weight, weight)) <= |
| - imbalance*(load + effective_load(tg, prev_cpu, 0, weight)); |
| + if (this_load) { |
| + unsigned long this_eff_load, prev_eff_load; |
| + |
| + this_eff_load = 100; |
| + this_eff_load *= power_of(prev_cpu); |
| + this_eff_load *= this_load + |
| + effective_load(tg, this_cpu, weight, weight); |
| + |
| + prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; |
| + prev_eff_load *= power_of(this_cpu); |
| + prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight); |
| + |
| + balanced = this_eff_load <= prev_eff_load; |
| + } else |
| + balanced = true; |
| |
| /* |
| * If the currently running task will sleep within |
| @@ -2392,6 +2401,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu) |
| if (!power) |
| power = 1; |
| |
| + cpu_rq(cpu)->cpu_power = power; |
| sdg->cpu_power = power; |
| } |
| |
| -- |
| 1.7.1.1 |
| |