| From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com> |
| Subject: mm: convert ptlock_alloc() to use ptdescs |
| Date: Mon, 7 Aug 2023 16:04:47 -0700 |
| |
| This removes some direct accesses to struct page, working towards |
| splitting out struct ptdesc from struct page. |
| |
| Link: https://lkml.kernel.org/r/20230807230513.102486-6-vishal.moola@gmail.com |
| Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> |
| Acked-by: Mike Rapoport (IBM) <rppt@kernel.org> |
| Cc: Arnd Bergmann <arnd@arndb.de> |
| Cc: Catalin Marinas <catalin.marinas@arm.com> |
| Cc: Christophe Leroy <christophe.leroy@csgroup.eu> |
| Cc: Claudio Imbrenda <imbrenda@linux.ibm.com> |
| Cc: Dave Hansen <dave.hansen@linux.intel.com> |
| Cc: David Hildenbrand <david@redhat.com> |
| Cc: "David S. Miller" <davem@davemloft.net> |
| Cc: Dinh Nguyen <dinguyen@kernel.org> |
| Cc: Geert Uytterhoeven <geert@linux-m68k.org> |
| Cc: Geert Uytterhoeven <geert+renesas@glider.be> |
| Cc: Guo Ren <guoren@kernel.org> |
| Cc: Huacai Chen <chenhuacai@kernel.org> |
| Cc: Hugh Dickins <hughd@google.com> |
| Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> |
| Cc: Jonas Bonn <jonas@southpole.se> |
| Cc: Matthew Wilcox <willy@infradead.org> |
| Cc: Palmer Dabbelt <palmer@rivosinc.com> |
| Cc: Paul Walmsley <paul.walmsley@sifive.com> |
| Cc: Richard Weinberger <richard@nod.at> |
| Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> |
| Cc: Yoshinori Sato <ysato@users.sourceforge.jp> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| include/linux/mm.h | 6 +++--- |
| mm/memory.c | 4 ++-- |
| 2 files changed, 5 insertions(+), 5 deletions(-) |
| |
| --- a/include/linux/mm.h~mm-convert-ptlock_alloc-to-use-ptdescs |
| +++ a/include/linux/mm.h |
| @@ -2826,7 +2826,7 @@ static inline void pagetable_free(struct |
| #if USE_SPLIT_PTE_PTLOCKS |
| #if ALLOC_SPLIT_PTLOCKS |
| void __init ptlock_cache_init(void); |
| -extern bool ptlock_alloc(struct page *page); |
| +bool ptlock_alloc(struct ptdesc *ptdesc); |
| extern void ptlock_free(struct page *page); |
| |
| static inline spinlock_t *ptlock_ptr(struct page *page) |
| @@ -2838,7 +2838,7 @@ static inline void ptlock_cache_init(voi |
| { |
| } |
| |
| -static inline bool ptlock_alloc(struct page *page) |
| +static inline bool ptlock_alloc(struct ptdesc *ptdesc) |
| { |
| return true; |
| } |
| @@ -2868,7 +2868,7 @@ static inline bool ptlock_init(struct pa |
| * slab code uses page->slab_cache, which share storage with page->ptl. |
| */ |
| VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); |
| - if (!ptlock_alloc(page)) |
| + if (!ptlock_alloc(page_ptdesc(page))) |
| return false; |
| spin_lock_init(ptlock_ptr(page)); |
| return true; |
| --- a/mm/memory.c~mm-convert-ptlock_alloc-to-use-ptdescs |
| +++ a/mm/memory.c |
| @@ -6114,14 +6114,14 @@ void __init ptlock_cache_init(void) |
| SLAB_PANIC, NULL); |
| } |
| |
| -bool ptlock_alloc(struct page *page) |
| +bool ptlock_alloc(struct ptdesc *ptdesc) |
| { |
| spinlock_t *ptl; |
| |
| ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); |
| if (!ptl) |
| return false; |
| - page->ptl = ptl; |
| + ptdesc->ptl = ptl; |
| return true; |
| } |
| |
| _ |