| From: "Matthew Wilcox (Oracle)" <willy@infradead.org> |
| Subject: mm: add pmd_folio() |
| Date: Tue, 26 Mar 2024 20:28:23 +0000 |
| |
| Convert directly from a pmd to a folio without going through another |
| representation first. For now this is just a slightly shorter way to |
| write it, but it might end up being more efficient later. |
| |
| Link: https://lkml.kernel.org/r/20240326202833.523759-4-willy@infradead.org |
| Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> |
| Reviewed-by: David Hildenbrand <david@redhat.com> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| include/linux/pgtable.h | 2 ++ |
| mm/huge_memory.c | 6 +++--- |
| mm/madvise.c | 2 +- |
| mm/mempolicy.c | 2 +- |
| mm/mlock.c | 2 +- |
| mm/userfaultfd.c | 2 +- |
| 6 files changed, 9 insertions(+), 7 deletions(-) |
| |
| --- a/include/linux/pgtable.h~mm-add-pmd_folio |
| +++ a/include/linux/pgtable.h |
| @@ -50,6 +50,8 @@ |
| #define pmd_pgtable(pmd) pmd_page(pmd) |
| #endif |
| |
| +#define pmd_folio(pmd) page_folio(pmd_page(pmd)) |
| + |
| /* |
| * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD] |
| * |
| --- a/mm/huge_memory.c~mm-add-pmd_folio |
| +++ a/mm/huge_memory.c |
| @@ -1816,7 +1816,7 @@ bool madvise_free_huge_pmd(struct mmu_ga |
| goto out; |
| } |
| |
| - folio = pfn_folio(pmd_pfn(orig_pmd)); |
| + folio = pmd_folio(orig_pmd); |
| /* |
| * If other processes are mapping this folio, we couldn't discard |
| * the folio unless they all do MADV_FREE so let's skip the folio. |
| @@ -2086,7 +2086,7 @@ int change_huge_pmd(struct mmu_gather *t |
| if (pmd_protnone(*pmd)) |
| goto unlock; |
| |
| - folio = page_folio(pmd_page(*pmd)); |
| + folio = pmd_folio(*pmd); |
| toptier = node_is_toptier(folio_nid(folio)); |
| /* |
| * Skip scanning top tier node if normal numa |
| @@ -2663,7 +2663,7 @@ void __split_huge_pmd(struct vm_area_str |
| * It's safe to call pmd_page when folio is set because it's |
| * guaranteed that pmd is present. |
| */ |
| - if (folio && folio != page_folio(pmd_page(*pmd))) |
| + if (folio && folio != pmd_folio(*pmd)) |
| goto out; |
| __split_huge_pmd_locked(vma, pmd, range.start, freeze); |
| } |
| --- a/mm/madvise.c~mm-add-pmd_folio |
| +++ a/mm/madvise.c |
| @@ -363,7 +363,7 @@ static int madvise_cold_or_pageout_pte_r |
| goto huge_unlock; |
| } |
| |
| - folio = pfn_folio(pmd_pfn(orig_pmd)); |
| + folio = pmd_folio(orig_pmd); |
| |
| /* Do not interfere with other mappings of this folio */ |
| if (folio_likely_mapped_shared(folio)) |
| --- a/mm/mempolicy.c~mm-add-pmd_folio |
| +++ a/mm/mempolicy.c |
| @@ -509,7 +509,7 @@ static void queue_folios_pmd(pmd_t *pmd, |
| qp->nr_failed++; |
| return; |
| } |
| - folio = pfn_folio(pmd_pfn(*pmd)); |
| + folio = pmd_folio(*pmd); |
| if (is_huge_zero_folio(folio)) { |
| walk->action = ACTION_CONTINUE; |
| return; |
| --- a/mm/mlock.c~mm-add-pmd_folio |
| +++ a/mm/mlock.c |
| @@ -378,7 +378,7 @@ static int mlock_pte_range(pmd_t *pmd, u |
| goto out; |
| if (is_huge_zero_pmd(*pmd)) |
| goto out; |
| - folio = page_folio(pmd_page(*pmd)); |
| + folio = pmd_folio(*pmd); |
| if (vma->vm_flags & VM_LOCKED) |
| mlock_folio(folio); |
| else |
| --- a/mm/userfaultfd.c~mm-add-pmd_folio |
| +++ a/mm/userfaultfd.c |
| @@ -1662,7 +1662,7 @@ ssize_t move_pages(struct userfaultfd_ct |
| /* Check if we can move the pmd without splitting it. */ |
| if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) || |
| !pmd_none(dst_pmdval)) { |
| - struct folio *folio = pfn_folio(pmd_pfn(*src_pmd)); |
| + struct folio *folio = pmd_folio(*src_pmd); |
| |
| if (!folio || (!is_huge_zero_folio(folio) && |
| !PageAnonExclusive(&folio->page))) { |
| _ |