| From 51c3c62b58b357e8d35e4cc32f7b4ec907426fe3 Mon Sep 17 00:00:00 2001 |
| From: Michael Neuling <mikey@neuling.org> |
| Date: Fri, 14 Sep 2018 11:14:11 +1000 |
| Subject: powerpc: Avoid code patching freed init sections |
| |
| From: Michael Neuling <mikey@neuling.org> |
| |
| commit 51c3c62b58b357e8d35e4cc32f7b4ec907426fe3 upstream. |
| |
| This stops us from doing code patching in init sections after they've |
| been freed. |
| |
| In this chain: |
| kvm_guest_init() -> |
| kvm_use_magic_page() -> |
| fault_in_pages_readable() -> |
| __get_user() -> |
| __get_user_nocheck() -> |
| barrier_nospec(); |
| |
| We have a code patching location at barrier_nospec() and |
| kvm_guest_init() is an init function. This whole chain gets inlined, |
| so when we free the init section (hence kvm_guest_init()), this code |
| goes away and hence should no longer be patched. |
| |
| We seen this as userspace memory corruption when using a memory |
| checker while doing partition migration testing on powervm (this |
| starts the code patching post migration via |
| /sys/kernel/mobility/migration). In theory, it could also happen when |
| using /sys/kernel/debug/powerpc/barrier_nospec. |
| |
| Cc: stable@vger.kernel.org # 4.13+ |
| Signed-off-by: Michael Neuling <mikey@neuling.org> |
| Reviewed-by: Nicholas Piggin <npiggin@gmail.com> |
| Reviewed-by: Christophe Leroy <christophe.leroy@c-s.fr> |
| Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| --- |
| arch/powerpc/include/asm/setup.h | 1 + |
| arch/powerpc/lib/code-patching.c | 7 +++++++ |
| arch/powerpc/mm/mem.c | 2 ++ |
| 3 files changed, 10 insertions(+) |
| |
| --- a/arch/powerpc/include/asm/setup.h |
| +++ b/arch/powerpc/include/asm/setup.h |
| @@ -9,6 +9,7 @@ extern void ppc_printk_progress(char *s, |
| |
| extern unsigned int rtas_data; |
| extern unsigned long long memory_limit; |
| +extern bool init_mem_is_free; |
| extern unsigned long klimit; |
| extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); |
| |
| --- a/arch/powerpc/lib/code-patching.c |
| +++ b/arch/powerpc/lib/code-patching.c |
| @@ -22,12 +22,19 @@ |
| #include <asm/page.h> |
| #include <asm/code-patching.h> |
| #include <asm/setup.h> |
| +#include <asm/sections.h> |
| |
| static int __patch_instruction(unsigned int *exec_addr, unsigned int instr, |
| unsigned int *patch_addr) |
| { |
| int err; |
| |
| + /* Make sure we aren't patching a freed init section */ |
| + if (init_mem_is_free && init_section_contains(exec_addr, 4)) { |
| + pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr); |
| + return 0; |
| + } |
| + |
| __put_user_size(instr, patch_addr, 4, err); |
| if (err) |
| return err; |
| --- a/arch/powerpc/mm/mem.c |
| +++ b/arch/powerpc/mm/mem.c |
| @@ -63,6 +63,7 @@ |
| #endif |
| |
| unsigned long long memory_limit; |
| +bool init_mem_is_free; |
| |
| #ifdef CONFIG_HIGHMEM |
| pte_t *kmap_pte; |
| @@ -405,6 +406,7 @@ void free_initmem(void) |
| { |
| ppc_md.progress = ppc_printk_progress; |
| mark_initmem_nx(); |
| + init_mem_is_free = true; |
| free_initmem_default(POISON_FREE_INITMEM); |
| } |
| |