| From 6a942f5780545ebd11aca8b3ac4b163397962322 Mon Sep 17 00:00:00 2001 |
| From: Valentin Schneider <valentin.schneider@arm.com> |
| Date: Wed, 7 Jul 2021 17:33:38 +0100 |
| Subject: s390: preempt: Fix preempt_count initialization |
| |
| From: Valentin Schneider <valentin.schneider@arm.com> |
| |
| commit 6a942f5780545ebd11aca8b3ac4b163397962322 upstream. |
| |
| S390's init_idle_preempt_count(p, cpu) doesn't actually let us initialize the |
| preempt_count of the requested CPU's idle task: it unconditionally writes |
| to the current CPU's. This clearly conflicts with idle_threads_init(), |
| which intends to initialize *all* the idle tasks, including their |
| preempt_count (or their CPU's, if the arch uses a per-CPU preempt_count). |
| |
| Unfortunately, it seems the way s390 does things doesn't let us initialize |
| every possible CPU's preempt_count early on, as the pages where this |
| resides are only allocated when a CPU is brought up and are freed when it |
| is brought down. |
| |
| Let the arch-specific code set a CPU's preempt_count when its lowcore is |
| allocated, and turn init_idle_preempt_count() into an empty stub. |
| |
| Fixes: f1a0a376ca0c ("sched/core: Initialize the idle task with preemption disabled") |
| Reported-by: Guenter Roeck <linux@roeck-us.net> |
| Signed-off-by: Valentin Schneider <valentin.schneider@arm.com> |
| Tested-by: Guenter Roeck <linux@roeck-us.net> |
| Reviewed-by: Heiko Carstens <hca@linux.ibm.com> |
| Link: https://lore.kernel.org/r/20210707163338.1623014-1-valentin.schneider@arm.com |
| Signed-off-by: Vasily Gorbik <gor@linux.ibm.com> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| --- |
| arch/s390/include/asm/preempt.h | 16 ++++------------ |
| arch/s390/kernel/setup.c | 1 + |
| arch/s390/kernel/smp.c | 1 + |
| 3 files changed, 6 insertions(+), 12 deletions(-) |
| |
| --- a/arch/s390/include/asm/preempt.h |
| +++ b/arch/s390/include/asm/preempt.h |
| @@ -29,12 +29,6 @@ static inline void preempt_count_set(int |
| old, new) != old); |
| } |
| |
| -#define init_task_preempt_count(p) do { } while (0) |
| - |
| -#define init_idle_preempt_count(p, cpu) do { \ |
| - S390_lowcore.preempt_count = PREEMPT_DISABLED; \ |
| -} while (0) |
| - |
| static inline void set_preempt_need_resched(void) |
| { |
| __atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count); |
| @@ -88,12 +82,6 @@ static inline void preempt_count_set(int |
| S390_lowcore.preempt_count = pc; |
| } |
| |
| -#define init_task_preempt_count(p) do { } while (0) |
| - |
| -#define init_idle_preempt_count(p, cpu) do { \ |
| - S390_lowcore.preempt_count = PREEMPT_DISABLED; \ |
| -} while (0) |
| - |
| static inline void set_preempt_need_resched(void) |
| { |
| } |
| @@ -130,6 +118,10 @@ static inline bool should_resched(int pr |
| |
| #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ |
| |
| +#define init_task_preempt_count(p) do { } while (0) |
| +/* Deferred to CPU bringup time */ |
| +#define init_idle_preempt_count(p, cpu) do { } while (0) |
| + |
| #ifdef CONFIG_PREEMPTION |
| extern asmlinkage void preempt_schedule(void); |
| #define __preempt_schedule() preempt_schedule() |
| --- a/arch/s390/kernel/setup.c |
| +++ b/arch/s390/kernel/setup.c |
| @@ -454,6 +454,7 @@ static void __init setup_lowcore_dat_off |
| lc->br_r1_trampoline = 0x07f1; /* br %r1 */ |
| lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); |
| lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); |
| + lc->preempt_count = PREEMPT_DISABLED; |
| |
| set_prefix((u32)(unsigned long) lc); |
| lowcore_ptr[0] = lc; |
| --- a/arch/s390/kernel/smp.c |
| +++ b/arch/s390/kernel/smp.c |
| @@ -215,6 +215,7 @@ static int pcpu_alloc_lowcore(struct pcp |
| lc->br_r1_trampoline = 0x07f1; /* br %r1 */ |
| lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); |
| lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); |
| + lc->preempt_count = PREEMPT_DISABLED; |
| if (nmi_alloc_per_cpu(lc)) |
| goto out_async; |
| if (vdso_alloc_per_cpu(lc)) |