| From 75a3203c35965f5569f845d690a2e061ab1a4b46 Mon Sep 17 00:00:00 2001 |
| From: Catalin Marinas <catalin.marinas@arm.com> |
| Date: Fri, 5 Apr 2019 18:38:49 -0700 |
| Subject: kmemleak: powerpc: skip scanning holes in the .bss section |
| |
| [ Upstream commit 298a32b132087550d3fa80641ca58323c5dfd4d9 ] |
| |
| Commit 2d4f567103ff ("KVM: PPC: Introduce kvm_tmp framework") adds |
| kvm_tmp[] into the .bss section and then free the rest of unused spaces |
| back to the page allocator. |
| |
| kernel_init |
| kvm_guest_init |
| kvm_free_tmp |
| free_reserved_area |
| free_unref_page |
| free_unref_page_prepare |
| |
| With DEBUG_PAGEALLOC=y, it will unmap those pages from kernel. As the |
| result, kmemleak scan will trigger a panic when it scans the .bss |
| section with unmapped pages. |
| |
| This patch creates dedicated kmemleak objects for the .data, .bss and |
| potentially .data..ro_after_init sections to allow partial freeing via |
| the kmemleak_free_part() in the powerpc kvm_free_tmp() function. |
| |
| Link: http://lkml.kernel.org/r/20190321171917.62049-1-catalin.marinas@arm.com |
| Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> |
| Reported-by: Qian Cai <cai@lca.pw> |
| Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc) |
| Tested-by: Qian Cai <cai@lca.pw> |
| Cc: Paul Mackerras <paulus@samba.org> |
| Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> |
| Cc: Avi Kivity <avi@redhat.com> |
| Cc: Paolo Bonzini <pbonzini@redhat.com> |
| Cc: Radim Krcmar <rkrcmar@redhat.com> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> |
| Signed-off-by: Sasha Levin (Microsoft) <sashal@kernel.org> |
| --- |
| arch/powerpc/kernel/kvm.c | 7 +++++++ |
| mm/kmemleak.c | 16 +++++++++++----- |
| 2 files changed, 18 insertions(+), 5 deletions(-) |
| |
| diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c |
| index 683b5b3805bd..cd381e2291df 100644 |
| --- a/arch/powerpc/kernel/kvm.c |
| +++ b/arch/powerpc/kernel/kvm.c |
| @@ -22,6 +22,7 @@ |
| #include <linux/kvm_host.h> |
| #include <linux/init.h> |
| #include <linux/export.h> |
| +#include <linux/kmemleak.h> |
| #include <linux/kvm_para.h> |
| #include <linux/slab.h> |
| #include <linux/of.h> |
| @@ -712,6 +713,12 @@ static void kvm_use_magic_page(void) |
| |
| static __init void kvm_free_tmp(void) |
| { |
| + /* |
| + * Inform kmemleak about the hole in the .bss section since the |
| + * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y. |
| + */ |
| + kmemleak_free_part(&kvm_tmp[kvm_tmp_index], |
| + ARRAY_SIZE(kvm_tmp) - kvm_tmp_index); |
| free_reserved_area(&kvm_tmp[kvm_tmp_index], |
| &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL); |
| } |
| diff --git a/mm/kmemleak.c b/mm/kmemleak.c |
| index 707fa5579f66..6c318f5ac234 100644 |
| --- a/mm/kmemleak.c |
| +++ b/mm/kmemleak.c |
| @@ -1529,11 +1529,6 @@ static void kmemleak_scan(void) |
| } |
| rcu_read_unlock(); |
| |
| - /* data/bss scanning */ |
| - scan_large_block(_sdata, _edata); |
| - scan_large_block(__bss_start, __bss_stop); |
| - scan_large_block(__start_ro_after_init, __end_ro_after_init); |
| - |
| #ifdef CONFIG_SMP |
| /* per-cpu sections scanning */ |
| for_each_possible_cpu(i) |
| @@ -2071,6 +2066,17 @@ void __init kmemleak_init(void) |
| } |
| local_irq_restore(flags); |
| |
| + /* register the data/bss sections */ |
| + create_object((unsigned long)_sdata, _edata - _sdata, |
| + KMEMLEAK_GREY, GFP_ATOMIC); |
| + create_object((unsigned long)__bss_start, __bss_stop - __bss_start, |
| + KMEMLEAK_GREY, GFP_ATOMIC); |
| + /* only register .data..ro_after_init if not within .data */ |
| + if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata) |
| + create_object((unsigned long)__start_ro_after_init, |
| + __end_ro_after_init - __start_ro_after_init, |
| + KMEMLEAK_GREY, GFP_ATOMIC); |
| + |
| /* |
| * This is the point where tracking allocations is safe. Automatic |
| * scanning is started during the late initcall. Add the early logged |
| -- |
| 2.20.1 |
| |