| From: Alexander Potapenko <glider@google.com> |
| Subject: mm: kmsan: call KMSAN hooks from SLUB code |
| Date: Thu, 15 Sep 2022 17:03:49 +0200 |
| |
| In order to report uninitialized memory coming from heap allocations KMSAN |
| has to poison them unless they're created with __GFP_ZERO. |
| |
| It's handy that we need KMSAN hooks in the places where |
| init_on_alloc/init_on_free initialization is performed. |
| |
| In addition, we apply __no_kmsan_checks to get_freepointer_safe() to |
| suppress reports when accessing freelist pointers that reside in freed |
| objects. |
| |
| Link: https://lkml.kernel.org/r/20220915150417.722975-16-glider@google.com |
| Signed-off-by: Alexander Potapenko <glider@google.com> |
| Reviewed-by: Marco Elver <elver@google.com> |
| Cc: Alexander Viro <viro@zeniv.linux.org.uk> |
| Cc: Alexei Starovoitov <ast@kernel.org> |
| Cc: Andrey Konovalov <andreyknvl@gmail.com> |
| Cc: Andrey Konovalov <andreyknvl@google.com> |
| Cc: Andy Lutomirski <luto@kernel.org> |
| Cc: Arnd Bergmann <arnd@arndb.de> |
| Cc: Borislav Petkov <bp@alien8.de> |
| Cc: Christoph Hellwig <hch@lst.de> |
| Cc: Christoph Lameter <cl@linux.com> |
| Cc: David Rientjes <rientjes@google.com> |
| Cc: Dmitry Vyukov <dvyukov@google.com> |
| Cc: Eric Biggers <ebiggers@google.com> |
| Cc: Eric Biggers <ebiggers@kernel.org> |
| Cc: Eric Dumazet <edumazet@google.com> |
| Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| Cc: Herbert Xu <herbert@gondor.apana.org.au> |
| Cc: Ilya Leoshkevich <iii@linux.ibm.com> |
| Cc: Ingo Molnar <mingo@redhat.com> |
| Cc: Jens Axboe <axboe@kernel.dk> |
| Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> |
| Cc: Kees Cook <keescook@chromium.org> |
| Cc: Mark Rutland <mark.rutland@arm.com> |
| Cc: Matthew Wilcox <willy@infradead.org> |
| Cc: Michael S. Tsirkin <mst@redhat.com> |
| Cc: Pekka Enberg <penberg@kernel.org> |
| Cc: Peter Zijlstra <peterz@infradead.org> |
| Cc: Petr Mladek <pmladek@suse.com> |
| Cc: Stephen Rothwell <sfr@canb.auug.org.au> |
| Cc: Steven Rostedt <rostedt@goodmis.org> |
| Cc: Thomas Gleixner <tglx@linutronix.de> |
| Cc: Vasily Gorbik <gor@linux.ibm.com> |
| Cc: Vegard Nossum <vegard.nossum@oracle.com> |
| Cc: Vlastimil Babka <vbabka@suse.cz> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| include/linux/kmsan.h | 57 ++++++++++++++++++++++++++++++ |
| mm/kmsan/hooks.c | 76 ++++++++++++++++++++++++++++++++++++++++ |
| mm/slab.h | 1 |
| mm/slub.c | 17 ++++++++ |
| 4 files changed, 151 insertions(+) |
| |
| --- a/include/linux/kmsan.h~mm-kmsan-call-kmsan-hooks-from-slub-code |
| +++ a/include/linux/kmsan.h |
| @@ -14,6 +14,7 @@ |
| #include <linux/types.h> |
| |
| struct page; |
| +struct kmem_cache; |
| |
| #ifdef CONFIG_KMSAN |
| |
| @@ -49,6 +50,44 @@ void kmsan_free_page(struct page *page, |
| void kmsan_copy_page_meta(struct page *dst, struct page *src); |
| |
| /** |
| + * kmsan_slab_alloc() - Notify KMSAN about a slab allocation. |
| + * @s: slab cache the object belongs to. |
| + * @object: object pointer. |
| + * @flags: GFP flags passed to the allocator. |
| + * |
| + * Depending on cache flags and GFP flags, KMSAN sets up the metadata of the |
| + * newly created object, marking it as initialized or uninitialized. |
| + */ |
| +void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); |
| + |
| +/** |
| + * kmsan_slab_free() - Notify KMSAN about a slab deallocation. |
| + * @s: slab cache the object belongs to. |
| + * @object: object pointer. |
| + * |
| + * KMSAN marks the freed object as uninitialized. |
| + */ |
| +void kmsan_slab_free(struct kmem_cache *s, void *object); |
| + |
| +/** |
| + * kmsan_kmalloc_large() - Notify KMSAN about a large slab allocation. |
| + * @ptr: object pointer. |
| + * @size: object size. |
| + * @flags: GFP flags passed to the allocator. |
| + * |
| + * Similar to kmsan_slab_alloc(), but for large allocations. |
| + */ |
| +void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); |
| + |
| +/** |
| + * kmsan_kfree_large() - Notify KMSAN about a large slab deallocation. |
| + * @ptr: object pointer. |
| + * |
| + * Similar to kmsan_slab_free(), but for large allocations. |
| + */ |
| +void kmsan_kfree_large(const void *ptr); |
| + |
| +/** |
| * kmsan_map_kernel_range_noflush() - Notify KMSAN about a vmap. |
| * @start: start of vmapped range. |
| * @end: end of vmapped range. |
| @@ -114,6 +153,24 @@ static inline void kmsan_copy_page_meta( |
| { |
| } |
| |
| +static inline void kmsan_slab_alloc(struct kmem_cache *s, void *object, |
| + gfp_t flags) |
| +{ |
| +} |
| + |
| +static inline void kmsan_slab_free(struct kmem_cache *s, void *object) |
| +{ |
| +} |
| + |
| +static inline void kmsan_kmalloc_large(const void *ptr, size_t size, |
| + gfp_t flags) |
| +{ |
| +} |
| + |
| +static inline void kmsan_kfree_large(const void *ptr) |
| +{ |
| +} |
| + |
| static inline void kmsan_vmap_pages_range_noflush(unsigned long start, |
| unsigned long end, |
| pgprot_t prot, |
| --- a/mm/kmsan/hooks.c~mm-kmsan-call-kmsan-hooks-from-slub-code |
| +++ a/mm/kmsan/hooks.c |
| @@ -27,6 +27,82 @@ |
| * skipping effects of functions like memset() inside instrumented code. |
| */ |
| |
| +void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags) |
| +{ |
| + if (unlikely(object == NULL)) |
| + return; |
| + if (!kmsan_enabled || kmsan_in_runtime()) |
| + return; |
| + /* |
| + * There's a ctor or this is an RCU cache - do nothing. The memory |
| + * status hasn't changed since last use. |
| + */ |
| + if (s->ctor || (s->flags & SLAB_TYPESAFE_BY_RCU)) |
| + return; |
| + |
| + kmsan_enter_runtime(); |
| + if (flags & __GFP_ZERO) |
| + kmsan_internal_unpoison_memory(object, s->object_size, |
| + KMSAN_POISON_CHECK); |
| + else |
| + kmsan_internal_poison_memory(object, s->object_size, flags, |
| + KMSAN_POISON_CHECK); |
| + kmsan_leave_runtime(); |
| +} |
| + |
| +void kmsan_slab_free(struct kmem_cache *s, void *object) |
| +{ |
| + if (!kmsan_enabled || kmsan_in_runtime()) |
| + return; |
| + |
| + /* RCU slabs could be legally used after free within the RCU period */ |
| + if (unlikely(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))) |
| + return; |
| + /* |
| + * If there's a constructor, freed memory must remain in the same state |
| + * until the next allocation. We cannot save its state to detect |
| + * use-after-free bugs, instead we just keep it unpoisoned. |
| + */ |
| + if (s->ctor) |
| + return; |
| + kmsan_enter_runtime(); |
| + kmsan_internal_poison_memory(object, s->object_size, GFP_KERNEL, |
| + KMSAN_POISON_CHECK | KMSAN_POISON_FREE); |
| + kmsan_leave_runtime(); |
| +} |
| + |
| +void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) |
| +{ |
| + if (unlikely(ptr == NULL)) |
| + return; |
| + if (!kmsan_enabled || kmsan_in_runtime()) |
| + return; |
| + kmsan_enter_runtime(); |
| + if (flags & __GFP_ZERO) |
| + kmsan_internal_unpoison_memory((void *)ptr, size, |
| + /*checked*/ true); |
| + else |
| + kmsan_internal_poison_memory((void *)ptr, size, flags, |
| + KMSAN_POISON_CHECK); |
| + kmsan_leave_runtime(); |
| +} |
| + |
| +void kmsan_kfree_large(const void *ptr) |
| +{ |
| + struct page *page; |
| + |
| + if (!kmsan_enabled || kmsan_in_runtime()) |
| + return; |
| + kmsan_enter_runtime(); |
| + page = virt_to_head_page((void *)ptr); |
| + KMSAN_WARN_ON(ptr != page_address(page)); |
| + kmsan_internal_poison_memory((void *)ptr, |
| + PAGE_SIZE << compound_order(page), |
| + GFP_KERNEL, |
| + KMSAN_POISON_CHECK | KMSAN_POISON_FREE); |
| + kmsan_leave_runtime(); |
| +} |
| + |
| static unsigned long vmalloc_shadow(unsigned long addr) |
| { |
| return (unsigned long)kmsan_get_metadata((void *)addr, |
| --- a/mm/slab.h~mm-kmsan-call-kmsan-hooks-from-slub-code |
| +++ a/mm/slab.h |
| @@ -729,6 +729,7 @@ static inline void slab_post_alloc_hook( |
| memset(p[i], 0, s->object_size); |
| kmemleak_alloc_recursive(p[i], s->object_size, 1, |
| s->flags, flags); |
| + kmsan_slab_alloc(s, p[i], flags); |
| } |
| |
| memcg_slab_post_alloc_hook(s, objcg, flags, size, p); |
| --- a/mm/slub.c~mm-kmsan-call-kmsan-hooks-from-slub-code |
| +++ a/mm/slub.c |
| @@ -22,6 +22,7 @@ |
| #include <linux/proc_fs.h> |
| #include <linux/seq_file.h> |
| #include <linux/kasan.h> |
| +#include <linux/kmsan.h> |
| #include <linux/cpu.h> |
| #include <linux/cpuset.h> |
| #include <linux/mempolicy.h> |
| @@ -359,6 +360,17 @@ static void prefetch_freepointer(const s |
| prefetchw(object + s->offset); |
| } |
| |
| +/* |
| + * When running under KMSAN, get_freepointer_safe() may return an uninitialized |
| + * pointer value in the case the current thread loses the race for the next |
| + * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in |
| + * slab_alloc_node() will fail, so the uninitialized value won't be used, but |
| + * KMSAN will still check all arguments of cmpxchg because of imperfect |
| + * handling of inline assembly. |
| + * To work around this problem, we apply __no_kmsan_checks to ensure that |
| + * get_freepointer_safe() returns initialized memory. |
| + */ |
| +__no_kmsan_checks |
| static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) |
| { |
| unsigned long freepointer_addr; |
| @@ -1709,6 +1721,7 @@ static inline void *kmalloc_large_node_h |
| ptr = kasan_kmalloc_large(ptr, size, flags); |
| /* As ptr might get tagged, call kmemleak hook after KASAN. */ |
| kmemleak_alloc(ptr, size, 1, flags); |
| + kmsan_kmalloc_large(ptr, size, flags); |
| return ptr; |
| } |
| |
| @@ -1716,12 +1729,14 @@ static __always_inline void kfree_hook(v |
| { |
| kmemleak_free(x); |
| kasan_kfree_large(x); |
| + kmsan_kfree_large(x); |
| } |
| |
| static __always_inline bool slab_free_hook(struct kmem_cache *s, |
| void *x, bool init) |
| { |
| kmemleak_free_recursive(x, s->flags); |
| + kmsan_slab_free(s, x); |
| |
| debug_check_no_locks_freed(x, s->object_size); |
| |
| @@ -5941,6 +5956,7 @@ static char *create_unique_id(struct kme |
| p += sprintf(p, "%07u", s->size); |
| |
| BUG_ON(p > name + ID_STR_LENGTH - 1); |
| + kmsan_unpoison_memory(name, p - name); |
| return name; |
| } |
| |
| @@ -6042,6 +6058,7 @@ static int sysfs_slab_alias(struct kmem_ |
| al->name = name; |
| al->next = alias_list; |
| alias_list = al; |
| + kmsan_unpoison_memory(al, sizeof(*al)); |
| return 0; |
| } |
| |
| _ |