| From ae5e264f2b1fb1e4687636e0c0e0c4720ec6e6af Mon Sep 17 00:00:00 2001 |
| From: Michael Neuling <mikey@neuling.org> |
| Date: Thu, 11 Apr 2019 21:45:59 +1000 |
| Subject: powerpc: Avoid code patching freed init sections |
| |
| commit 51c3c62b58b357e8d35e4cc32f7b4ec907426fe3 upstream. |
| |
| This stops us from doing code patching in init sections after they've |
| been freed. |
| |
| In this chain: |
| kvm_guest_init() -> |
| kvm_use_magic_page() -> |
| fault_in_pages_readable() -> |
| __get_user() -> |
| __get_user_nocheck() -> |
| barrier_nospec(); |
| |
| We have a code patching location at barrier_nospec() and |
| kvm_guest_init() is an init function. This whole chain gets inlined, |
| so when we free the init section (hence kvm_guest_init()), this code |
| goes away and hence should no longer be patched. |
| |
| We seen this as userspace memory corruption when using a memory |
| checker while doing partition migration testing on powervm (this |
| starts the code patching post migration via |
| /sys/kernel/mobility/migration). In theory, it could also happen when |
| using /sys/kernel/debug/powerpc/barrier_nospec. |
| |
| Cc: stable@vger.kernel.org # 4.13+ |
| Signed-off-by: Michael Neuling <mikey@neuling.org> |
| Reviewed-by: Nicholas Piggin <npiggin@gmail.com> |
| Reviewed-by: Christophe Leroy <christophe.leroy@c-s.fr> |
| Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> |
| Signed-off-by: Sasha Levin <sashal@kernel.org> |
| --- |
| arch/powerpc/include/asm/setup.h | 1 + |
| arch/powerpc/lib/code-patching.c | 8 ++++++++ |
| arch/powerpc/mm/mem.c | 2 ++ |
| 3 files changed, 11 insertions(+) |
| |
| diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h |
| index 703ddf752516..709f4e739ae8 100644 |
| --- a/arch/powerpc/include/asm/setup.h |
| +++ b/arch/powerpc/include/asm/setup.h |
| @@ -8,6 +8,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex); |
| |
| extern unsigned int rtas_data; |
| extern unsigned long long memory_limit; |
| +extern bool init_mem_is_free; |
| extern unsigned long klimit; |
| extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); |
| |
| diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c |
| index 753d591f1b52..c77c486fbf24 100644 |
| --- a/arch/powerpc/lib/code-patching.c |
| +++ b/arch/powerpc/lib/code-patching.c |
| @@ -14,12 +14,20 @@ |
| #include <asm/page.h> |
| #include <asm/code-patching.h> |
| #include <asm/uaccess.h> |
| +#include <asm/setup.h> |
| +#include <asm/sections.h> |
| |
| |
| int patch_instruction(unsigned int *addr, unsigned int instr) |
| { |
| int err; |
| |
| + /* Make sure we aren't patching a freed init section */ |
| + if (init_mem_is_free && init_section_contains(addr, 4)) { |
| + pr_debug("Skipping init section patching addr: 0x%px\n", addr); |
| + return 0; |
| + } |
| + |
| __put_user_size(instr, addr, 4, err); |
| if (err) |
| return err; |
| diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c |
| index 5f844337de21..1e93dbc88e80 100644 |
| --- a/arch/powerpc/mm/mem.c |
| +++ b/arch/powerpc/mm/mem.c |
| @@ -62,6 +62,7 @@ |
| #endif |
| |
| unsigned long long memory_limit; |
| +bool init_mem_is_free; |
| |
| #ifdef CONFIG_HIGHMEM |
| pte_t *kmap_pte; |
| @@ -396,6 +397,7 @@ void __init mem_init(void) |
| void free_initmem(void) |
| { |
| ppc_md.progress = ppc_printk_progress; |
| + init_mem_is_free = true; |
| free_initmem_default(POISON_FREE_INITMEM); |
| } |
| |
| -- |
| 2.19.1 |
| |