| From 3d4ccca8d248e7d89507266a9aad7e005b19daef Mon Sep 17 00:00:00 2001 |
| From: Peter Zijlstra <peterz@infradead.org> |
| Date: Tue, 16 Mar 2010 14:31:44 -0700 |
| Subject: [PATCH] sched: Break out from load_balancing on rq_lock contention |
| |
| commit 5d2740b70e7f6ad29104aec72956fb6e4d143809 in tip. |
| |
| [PG: account for sched --> sched_fair code moves and that |
| the lock break in move_tasks is now upstream as baa8c110] |
| |
| Signed-off-by: Peter Zijlstra <peterz@infradead.org> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/kernel/sched.c b/kernel/sched.c |
| index 429d251..a196eb8 100644 |
| --- a/kernel/sched.c |
| +++ b/kernel/sched.c |
| @@ -813,7 +813,11 @@ late_initcall(sched_init_debug); |
| * Number of tasks to iterate in a single balance run. |
| * Limited because this is done with IRQs disabled. |
| */ |
| +#ifndef CONFIG_PREEMPT |
| const_debug unsigned int sysctl_sched_nr_migrate = 32; |
| +#else |
| +const_debug unsigned int sysctl_sched_nr_migrate = 8; |
| +#endif |
| |
| /* |
| * ratelimit for updating the group shares. |
| diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c |
| index 4c47b93..07132f0 100644 |
| --- a/kernel/sched_fair.c |
| +++ b/kernel/sched_fair.c |
| @@ -1954,6 +1954,10 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, |
| */ |
| if (idle == CPU_NEWLY_IDLE) |
| break; |
| + |
| + if (raw_spin_is_contended(&this_rq->lock) || |
| + raw_spin_is_contended(&busiest->lock)) |
| + break; |
| #endif |
| |
| /* |
| @@ -2022,6 +2026,20 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, |
| rem_load_move -= moved_load; |
| if (rem_load_move < 0) |
| break; |
| + |
| +#ifdef CONFIG_PREEMPT |
| + /* |
| + * NEWIDLE balancing is a source of latency, so preemptible |
| + * kernels will stop after the first task is pulled to minimize |
| + * the critical section. |
| + */ |
| + if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) |
| + break; |
| + |
| + if (raw_spin_is_contended(&this_rq->lock) || |
| + raw_spin_is_contended(&busiest->lock)) |
| + break; |
| +#endif |
| } |
| rcu_read_unlock(); |
| |
| -- |
| 1.7.1.1 |
| |