| From: Qi Zheng <zhengqi.arch@bytedance.com> |
| Subject: mm: pgtable: remove unnecessary split ptlock for kernel PMD page |
| Date: Thu, 1 Feb 2024 16:05:41 +0800 |
| |
| For kernel PMD entry, we use init_mm.page_table_lock to protect it, so |
| there is no need to allocate and initialize the split ptlock for kernel |
| PMD page. |
| |
| Link: https://lkml.kernel.org/r/63f0b3d2f9124ae5076963fb5505bd36daba0393.1706774109.git.zhengqi.arch@bytedance.com |
| Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com> |
| Reviewed-by: Muchun Song <muchun.song@linux.dev> |
| Cc: David Hildenbrand <david@redhat.com> |
| Cc: Matthew Wilcox <willy@infradead.org> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| include/asm-generic/pgalloc.h | 10 ++++++++-- |
| include/linux/mm.h | 21 ++++++++++++++++----- |
| 2 files changed, 24 insertions(+), 7 deletions(-) |
| |
| --- a/include/asm-generic/pgalloc.h~mm-pgtable-remove-unnecessary-split-ptlock-for-kernel-pmd-page |
| +++ a/include/asm-generic/pgalloc.h |
| @@ -139,7 +139,10 @@ static inline pmd_t *pmd_alloc_one(struc |
| ptdesc = pagetable_alloc(gfp, 0); |
| if (!ptdesc) |
| return NULL; |
| - if (!pagetable_pmd_ctor(ptdesc)) { |
| + |
| + if (mm == &init_mm) { |
| + __pagetable_pmd_ctor(ptdesc); |
| + } else if (!pagetable_pmd_ctor(ptdesc)) { |
| pagetable_free(ptdesc); |
| return NULL; |
| } |
| @@ -153,7 +156,10 @@ static inline void pmd_free(struct mm_st |
| struct ptdesc *ptdesc = virt_to_ptdesc(pmd); |
| |
| BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); |
| - pagetable_pmd_dtor(ptdesc); |
| + if (mm == &init_mm) |
| + __pagetable_pmd_dtor(ptdesc); |
| + else |
| + pagetable_pmd_dtor(ptdesc); |
| pagetable_free(ptdesc); |
| } |
| #endif |
| --- a/include/linux/mm.h~mm-pgtable-remove-unnecessary-split-ptlock-for-kernel-pmd-page |
| +++ a/include/linux/mm.h |
| @@ -3048,26 +3048,37 @@ static inline spinlock_t *pmd_lock(struc |
| return ptl; |
| } |
| |
| -static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc) |
| +static inline void __pagetable_pmd_ctor(struct ptdesc *ptdesc) |
| { |
| struct folio *folio = ptdesc_folio(ptdesc); |
| |
| - if (!pmd_ptlock_init(ptdesc)) |
| - return false; |
| __folio_set_pgtable(folio); |
| lruvec_stat_add_folio(folio, NR_PAGETABLE); |
| +} |
| + |
| +static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc) |
| +{ |
| + if (!pmd_ptlock_init(ptdesc)) |
| + return false; |
| + |
| + __pagetable_pmd_ctor(ptdesc); |
| return true; |
| } |
| |
| -static inline void pagetable_pmd_dtor(struct ptdesc *ptdesc) |
| +static inline void __pagetable_pmd_dtor(struct ptdesc *ptdesc) |
| { |
| struct folio *folio = ptdesc_folio(ptdesc); |
| |
| - pmd_ptlock_free(ptdesc); |
| __folio_clear_pgtable(folio); |
| lruvec_stat_sub_folio(folio, NR_PAGETABLE); |
| } |
| |
| +static inline void pagetable_pmd_dtor(struct ptdesc *ptdesc) |
| +{ |
| + pmd_ptlock_free(ptdesc); |
| + __pagetable_pmd_dtor(ptdesc); |
| +} |
| + |
| /* |
| * No scalability reason to split PUD locks yet, but follow the same pattern |
| * as the PMD locks to make it easier if we decide to. The VM should not be |
| _ |