| From 88c62d921bbb9b93c431e920e0719473e9bfca42 Mon Sep 17 00:00:00 2001 |
| From: Oleg Nesterov <oleg@redhat.com> |
| Date: Mon, 15 Mar 2010 10:10:14 +0100 |
| Subject: [PATCH] sched: move_task_off_dead_cpu(): Remove retry logic |
| |
| commit 2d5a7c7241f20ee8e90452a47b773cffbb4c2412 in tip. |
| |
| The previous patch preserved the retry logic, but it looks unneeded. |
| |
| __migrate_task() can only fail if we raced with migration after we dropped |
| the lock, but in this case the caller of set_cpus_allowed/etc must initiate |
| migration itself if ->on_rq == T. |
| |
| We already fixed p->cpus_allowed, the changes in active/online masks must |
| be visible to racer, it should migrate the task to online cpu correctly. |
| |
| [ upstream commit: c1804d547dc098363443667609c272d1e4d15ee8 ] |
| |
| [PG: warning; this differs from upstream by the critical change: |
| - needs_cpu = (task_cpu(p) == dead_cpu) && (p->state != TASK_WAKING); |
| + needs_cpu = (task_cpu(p) == dead_cpu) && !(p->state & TASK_WAKING); |
| which makes a big difference when on RT, TASK_WAKING is not exclusive.] |
| |
| Signed-off-by: Oleg Nesterov <oleg@redhat.com> |
| Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> |
| LKML-Reference: <20100315091014.GA9138@redhat.com> |
| Signed-off-by: Ingo Molnar <mingo@elte.hu> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/kernel/sched.c b/kernel/sched.c |
| index 30e59b6..dfc2d66 100644 |
| --- a/kernel/sched.c |
| +++ b/kernel/sched.c |
| @@ -7720,7 +7720,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
| struct rq *rq = cpu_rq(dead_cpu); |
| int needs_cpu, uninitialized_var(dest_cpu); |
| unsigned long flags; |
| -again: |
| + |
| local_irq_save(flags); |
| |
| raw_spin_lock(&rq->lock); |
| @@ -7728,14 +7728,13 @@ again: |
| if (needs_cpu) |
| dest_cpu = select_fallback_rq(dead_cpu, p); |
| raw_spin_unlock(&rq->lock); |
| - |
| - /* It can have affinity changed while we were choosing. */ |
| + /* |
| + * It can only fail if we race with set_cpus_allowed(), |
| + * in the racer should migrate the task anyway. |
| + */ |
| if (needs_cpu) |
| - needs_cpu = !__migrate_task(p, dead_cpu, dest_cpu); |
| + __migrate_task(p, dead_cpu, dest_cpu); |
| local_irq_restore(flags); |
| - |
| - if (unlikely(needs_cpu)) |
| - goto again; |
| } |
| |
| /* |
| -- |
| 1.7.1.1 |
| |