blob: 0df97c68bfbd6d4812671a0a3c624d169f8fb3ec [file] [log] [blame]
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:30:27 -0500
Subject: stop_machine: convert stop_machine_run() to PREEMPT_RT
Instead of playing with non-preemption, introduce explicit
startup serialization. This is more robust and cleaner as
well.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/stop_machine.c | 20 +++++++++++++++-----
1 file changed, 15 insertions(+), 5 deletions(-)
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -137,6 +137,7 @@ void stop_one_cpu_nowait(unsigned int cp
/* static data for stop_cpus */
static DEFINE_MUTEX(stop_cpus_mutex);
+static DEFINE_MUTEX(stopper_lock);
static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
static void queue_stop_cpus_work(const struct cpumask *cpumask,
@@ -155,14 +156,13 @@ static void queue_stop_cpus_work(const s
}
/*
- * Disable preemption while queueing to avoid getting
- * preempted by a stopper which might wait for other stoppers
- * to enter @fn which can lead to deadlock.
+ * Make sure that all work is queued on all cpus before we
+ * any of the cpus can execute it.
*/
- preempt_disable();
+ mutex_lock(&stopper_lock);
for_each_cpu(cpu, cpumask)
cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu));
- preempt_enable();
+ mutex_unlock(&stopper_lock);
}
static int __stop_cpus(const struct cpumask *cpumask,
@@ -279,6 +279,16 @@ repeat:
struct cpu_stop_done *done = work->done;
char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
+ /*
+ * Wait until the stopper finished scheduling on all
+ * cpus
+ */
+ mutex_lock(&stopper_lock);
+ /*
+ * Let other cpu threads continue as well
+ */
+ mutex_unlock(&stopper_lock);
+
/* cpu stop callbacks are not allowed to sleep */
preempt_disable();