| From: Balbir Singh <balbirs@nvidia.com> |
| Subject: mm/migrate_device: add THP splitting during migration |
| Date: Mon, 8 Sep 2025 10:04:43 +1000 |
| |
| Implement migrate_vma_split_pages() to handle THP splitting during the |
| migration process when destination cannot allocate compound pages. |
| |
| This addresses the common scenario where migrate_vma_setup() succeeds with |
| MIGRATE_PFN_COMPOUND pages, but the destination device cannot allocate |
| large pages during the migration phase. |
| |
| Key changes: |
| - migrate_vma_split_pages(): Split already-isolated pages during migration |
| - Enhanced folio_split() and __split_unmapped_folio() with isolated |
| parameter to avoid redundant unmap/remap operations |
| |
| This provides a fallback mechansim to ensure migration succeeds even when |
| large page allocation fails at the destination. |
| |
| Link: https://lkml.kernel.org/r/20250908000448.180088-11-balbirs@nvidia.com |
| Signed-off-by: Balbir Singh <balbirs@nvidia.com> |
| Cc: David Hildenbrand <david@redhat.com> |
| Cc: Zi Yan <ziy@nvidia.com> |
| Cc: Joshua Hahn <joshua.hahnjy@gmail.com> |
| Cc: Rakie Kim <rakie.kim@sk.com> |
| Cc: Byungchul Park <byungchul@sk.com> |
| Cc: Gregory Price <gourry@gourry.net> |
| Cc: Ying Huang <ying.huang@linux.alibaba.com> |
| Cc: Alistair Popple <apopple@nvidia.com> |
| Cc: Oscar Salvador <osalvador@suse.de> |
| Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> |
| Cc: Baolin Wang <baolin.wang@linux.alibaba.com> |
| Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com> |
| Cc: Nico Pache <npache@redhat.com> |
| Cc: Ryan Roberts <ryan.roberts@arm.com> |
| Cc: Dev Jain <dev.jain@arm.com> |
| Cc: Barry Song <baohua@kernel.org> |
| Cc: Lyude Paul <lyude@redhat.com> |
| Cc: Danilo Krummrich <dakr@kernel.org> |
| Cc: David Airlie <airlied@gmail.com> |
| Cc: Simona Vetter <simona@ffwll.ch> |
| Cc: Ralph Campbell <rcampbell@nvidia.com> |
| Cc: Mika Penttilรค <mpenttil@redhat.com> |
| Cc: Matthew Brost <matthew.brost@intel.com> |
| Cc: Francois Dugast <francois.dugast@intel.com> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| include/linux/huge_mm.h | 11 ++++-- |
| lib/test_hmm.c | 9 ++++ |
| mm/huge_memory.c | 45 +++++++++++++----------- |
| mm/migrate_device.c | 69 +++++++++++++++++++++++++++++++------- |
| 4 files changed, 100 insertions(+), 34 deletions(-) |
| |
| --- a/include/linux/huge_mm.h~mm-migrate_device-add-thp-splitting-during-migration |
| +++ a/include/linux/huge_mm.h |
| @@ -365,8 +365,8 @@ unsigned long thp_get_unmapped_area_vmfl |
| vm_flags_t vm_flags); |
| |
| bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins); |
| -int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, |
| - unsigned int new_order); |
| +int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list, |
| + unsigned int new_order, bool unmapped); |
| int min_order_for_split(struct folio *folio); |
| int split_folio_to_list(struct folio *folio, struct list_head *list); |
| bool uniform_split_supported(struct folio *folio, unsigned int new_order, |
| @@ -375,6 +375,13 @@ bool non_uniform_split_supported(struct |
| bool warns); |
| int folio_split(struct folio *folio, unsigned int new_order, struct page *page, |
| struct list_head *list); |
| + |
| +static inline int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, |
| + unsigned int new_order) |
| +{ |
| + return __split_huge_page_to_list_to_order(page, list, new_order, false); |
| +} |
| + |
| /* |
| * try_folio_split - try to split a @folio at @page using non uniform split. |
| * @folio: folio to be split |
| --- a/lib/test_hmm.c~mm-migrate_device-add-thp-splitting-during-migration |
| +++ a/lib/test_hmm.c |
| @@ -1612,6 +1612,15 @@ static vm_fault_t dmirror_devmem_fault(s |
| nr = 1 << order; |
| |
| /* |
| + * When folios are partially mapped, we can't rely on the folio |
| + * order of vmf->page as the folio might not be fully split yet |
| + */ |
| + if (vmf->pte) { |
| + order = 0; |
| + nr = 1; |
| + } |
| + |
| + /* |
| * Consider a per-cpu cache of src and dst pfns, but with |
| * large number of cpus that might not scale well. |
| */ |
| --- a/mm/huge_memory.c~mm-migrate_device-add-thp-splitting-during-migration |
| +++ a/mm/huge_memory.c |
| @@ -3459,15 +3459,6 @@ static void __split_folio_to_order(struc |
| new_folio->mapping = folio->mapping; |
| new_folio->index = folio->index + i; |
| |
| - /* |
| - * page->private should not be set in tail pages. Fix up and warn once |
| - * if private is unexpectedly set. |
| - */ |
| - if (unlikely(new_folio->private)) { |
| - VM_WARN_ON_ONCE_PAGE(true, new_head); |
| - new_folio->private = NULL; |
| - } |
| - |
| if (folio_test_swapcache(folio)) |
| new_folio->swap.val = folio->swap.val + i; |
| |
| @@ -3696,6 +3687,7 @@ bool uniform_split_supported(struct foli |
| * @lock_at: a page within @folio to be left locked to caller |
| * @list: after-split folios will be put on it if non NULL |
| * @uniform_split: perform uniform split or not (non-uniform split) |
| + * @unmapped: The pages are already unmapped, they are migration entries. |
| * |
| * It calls __split_unmapped_folio() to perform uniform and non-uniform split. |
| * It is in charge of checking whether the split is supported or not and |
| @@ -3711,7 +3703,7 @@ bool uniform_split_supported(struct foli |
| */ |
| static int __folio_split(struct folio *folio, unsigned int new_order, |
| struct page *split_at, struct page *lock_at, |
| - struct list_head *list, bool uniform_split) |
| + struct list_head *list, bool uniform_split, bool unmapped) |
| { |
| struct deferred_split *ds_queue = get_deferred_split_queue(folio); |
| XA_STATE(xas, &folio->mapping->i_pages, folio->index); |
| @@ -3761,13 +3753,15 @@ static int __folio_split(struct folio *f |
| * is taken to serialise against parallel split or collapse |
| * operations. |
| */ |
| - anon_vma = folio_get_anon_vma(folio); |
| - if (!anon_vma) { |
| - ret = -EBUSY; |
| - goto out; |
| + if (!unmapped) { |
| + anon_vma = folio_get_anon_vma(folio); |
| + if (!anon_vma) { |
| + ret = -EBUSY; |
| + goto out; |
| + } |
| + anon_vma_lock_write(anon_vma); |
| } |
| mapping = NULL; |
| - anon_vma_lock_write(anon_vma); |
| } else { |
| unsigned int min_order; |
| gfp_t gfp; |
| @@ -3834,7 +3828,8 @@ static int __folio_split(struct folio *f |
| goto out_unlock; |
| } |
| |
| - unmap_folio(folio); |
| + if (!unmapped) |
| + unmap_folio(folio); |
| |
| /* block interrupt reentry in xa_lock and spinlock */ |
| local_irq_disable(); |
| @@ -3920,10 +3915,13 @@ static int __folio_split(struct folio *f |
| |
| next = folio_next(new_folio); |
| |
| + zone_device_private_split_cb(folio, new_folio); |
| + |
| expected_refs = folio_expected_ref_count(new_folio) + 1; |
| folio_ref_unfreeze(new_folio, expected_refs); |
| |
| - lru_add_split_folio(folio, new_folio, lruvec, list); |
| + if (!unmapped) |
| + lru_add_split_folio(folio, new_folio, lruvec, list); |
| |
| /* |
| * Anonymous folio with swap cache. |
| @@ -3955,6 +3953,7 @@ static int __folio_split(struct folio *f |
| folio_put_refs(new_folio, nr_pages); |
| } |
| |
| + zone_device_private_split_cb(folio, NULL); |
| /* |
| * Unfreeze @folio only after all page cache entries, which |
| * used to point to it, have been updated with new folios. |
| @@ -3978,6 +3977,9 @@ fail: |
| |
| local_irq_enable(); |
| |
| + if (unmapped) |
| + return ret; |
| + |
| if (nr_shmem_dropped) |
| shmem_uncharge(mapping->host, nr_shmem_dropped); |
| |
| @@ -4068,12 +4070,13 @@ out: |
| * Returns -EINVAL when trying to split to an order that is incompatible |
| * with the folio. Splitting to order 0 is compatible with all folios. |
| */ |
| -int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, |
| - unsigned int new_order) |
| +int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list, |
| + unsigned int new_order, bool unmapped) |
| { |
| struct folio *folio = page_folio(page); |
| |
| - return __folio_split(folio, new_order, &folio->page, page, list, true); |
| + return __folio_split(folio, new_order, &folio->page, page, list, true, |
| + unmapped); |
| } |
| |
| /* |
| @@ -4102,7 +4105,7 @@ int folio_split(struct folio *folio, uns |
| struct page *split_at, struct list_head *list) |
| { |
| return __folio_split(folio, new_order, split_at, &folio->page, list, |
| - false); |
| + false, false); |
| } |
| |
| int min_order_for_split(struct folio *folio) |
| --- a/mm/migrate_device.c~mm-migrate_device-add-thp-splitting-during-migration |
| +++ a/mm/migrate_device.c |
| @@ -898,6 +898,29 @@ abort: |
| src[i] &= ~MIGRATE_PFN_MIGRATE; |
| return 0; |
| } |
| + |
| +static int migrate_vma_split_pages(struct migrate_vma *migrate, |
| + unsigned long idx, unsigned long addr, |
| + struct folio *folio) |
| +{ |
| + unsigned long i; |
| + unsigned long pfn; |
| + unsigned long flags; |
| + int ret = 0; |
| + |
| + folio_get(folio); |
| + split_huge_pmd_address(migrate->vma, addr, true); |
| + ret = __split_huge_page_to_list_to_order(folio_page(folio, 0), NULL, |
| + 0, true); |
| + if (ret) |
| + return ret; |
| + migrate->src[idx] &= ~MIGRATE_PFN_COMPOUND; |
| + flags = migrate->src[idx] & ((1UL << MIGRATE_PFN_SHIFT) - 1); |
| + pfn = migrate->src[idx] >> MIGRATE_PFN_SHIFT; |
| + for (i = 1; i < HPAGE_PMD_NR; i++) |
| + migrate->src[i+idx] = migrate_pfn(pfn + i) | flags; |
| + return ret; |
| +} |
| #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */ |
| static int migrate_vma_insert_huge_pmd_page(struct migrate_vma *migrate, |
| unsigned long addr, |
| @@ -907,6 +930,13 @@ static int migrate_vma_insert_huge_pmd_p |
| { |
| return 0; |
| } |
| + |
| +static int migrate_vma_split_pages(struct migrate_vma *migrate, |
| + unsigned long idx, unsigned long addr, |
| + struct folio *folio) |
| +{ |
| + return 0; |
| +} |
| #endif |
| |
| /* |
| @@ -1056,8 +1086,9 @@ static void __migrate_device_pages(unsig |
| struct migrate_vma *migrate) |
| { |
| struct mmu_notifier_range range; |
| - unsigned long i; |
| + unsigned long i, j; |
| bool notified = false; |
| + unsigned long addr; |
| |
| for (i = 0; i < npages; ) { |
| struct page *newpage = migrate_pfn_to_page(dst_pfns[i]); |
| @@ -1099,12 +1130,16 @@ static void __migrate_device_pages(unsig |
| (!(dst_pfns[i] & MIGRATE_PFN_COMPOUND))) { |
| nr = HPAGE_PMD_NR; |
| src_pfns[i] &= ~MIGRATE_PFN_COMPOUND; |
| - src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; |
| - goto next; |
| + } else { |
| + nr = 1; |
| } |
| |
| - migrate_vma_insert_page(migrate, addr, &dst_pfns[i], |
| - &src_pfns[i]); |
| + for (j = 0; j < nr && i + j < npages; j++) { |
| + src_pfns[i+j] |= MIGRATE_PFN_MIGRATE; |
| + migrate_vma_insert_page(migrate, |
| + addr + j * PAGE_SIZE, |
| + &dst_pfns[i+j], &src_pfns[i+j]); |
| + } |
| goto next; |
| } |
| |
| @@ -1126,7 +1161,14 @@ static void __migrate_device_pages(unsig |
| MIGRATE_PFN_COMPOUND); |
| goto next; |
| } |
| - src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; |
| + nr = 1 << folio_order(folio); |
| + addr = migrate->start + i * PAGE_SIZE; |
| + if (migrate_vma_split_pages(migrate, i, addr, |
| + folio)) { |
| + src_pfns[i] &= ~(MIGRATE_PFN_MIGRATE | |
| + MIGRATE_PFN_COMPOUND); |
| + goto next; |
| + } |
| } else if ((src_pfns[i] & MIGRATE_PFN_MIGRATE) && |
| (dst_pfns[i] & MIGRATE_PFN_COMPOUND) && |
| !(src_pfns[i] & MIGRATE_PFN_COMPOUND)) { |
| @@ -1162,11 +1204,16 @@ static void __migrate_device_pages(unsig |
| |
| if (migrate && migrate->fault_page == page) |
| extra_cnt = 1; |
| - r = folio_migrate_mapping(mapping, newfolio, folio, extra_cnt); |
| - if (r) |
| - src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; |
| - else |
| - folio_migrate_flags(newfolio, folio); |
| + for (j = 0; j < nr && i + j < npages; j++) { |
| + folio = page_folio(migrate_pfn_to_page(src_pfns[i+j])); |
| + newfolio = page_folio(migrate_pfn_to_page(dst_pfns[i+j])); |
| + |
| + r = folio_migrate_mapping(mapping, newfolio, folio, extra_cnt); |
| + if (r) |
| + src_pfns[i+j] &= ~MIGRATE_PFN_MIGRATE; |
| + else |
| + folio_migrate_flags(newfolio, folio); |
| + } |
| next: |
| i += nr; |
| } |
| _ |