| From: Suren Baghdasaryan <surenb@google.com> |
| Subject: userfaultfd: fix PTE unmapping stack-allocated PTE copies |
| Date: Wed, 26 Feb 2025 10:55:09 -0800 |
| |
| Current implementation of move_pages_pte() copies source and destination |
| PTEs in order to detect concurrent changes to PTEs involved in the move. |
| However these copies are also used to unmap the PTEs, which will fail if |
| CONFIG_HIGHPTE is enabled because the copies are allocated on the stack. |
| Fix this by using the actual PTEs which were kmap()ed. |
| |
| Link: https://lkml.kernel.org/r/20250226185510.2732648-3-surenb@google.com |
| Fixes: adef440691ba ("userfaultfd: UFFDIO_MOVE uABI") |
| Signed-off-by: Suren Baghdasaryan <surenb@google.com> |
| Reported-by: Peter Xu <peterx@redhat.com> |
| Reviewed-by: Peter Xu <peterx@redhat.com> |
| Cc: Andrea Arcangeli <aarcange@redhat.com> |
| Cc: Barry Song <21cnbao@gmail.com> |
| Cc: Barry Song <v-songbaohua@oppo.com> |
| Cc: David Hildenbrand <david@redhat.com> |
| Cc: Hugh Dickins <hughd@google.com> |
| Cc: Jann Horn <jannh@google.com> |
| Cc: Kalesh Singh <kaleshsingh@google.com> |
| Cc: Liam R. Howlett <Liam.Howlett@Oracle.com> |
| Cc: Lokesh Gidra <lokeshgidra@google.com> |
| Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> |
| Cc: Matthew Wilcow (Oracle) <willy@infradead.org> |
| Cc: <stable@vger.kernel.org> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| mm/userfaultfd.c | 20 ++++++++++---------- |
| 1 file changed, 10 insertions(+), 10 deletions(-) |
| |
| --- a/mm/userfaultfd.c~userfaultfd-fix-pte-unmapping-stack-allocated-pte-copies |
| +++ a/mm/userfaultfd.c |
| @@ -1290,8 +1290,8 @@ retry: |
| spin_unlock(src_ptl); |
| |
| if (!locked) { |
| - pte_unmap(&orig_src_pte); |
| - pte_unmap(&orig_dst_pte); |
| + pte_unmap(src_pte); |
| + pte_unmap(dst_pte); |
| src_pte = dst_pte = NULL; |
| /* now we can block and wait */ |
| folio_lock(src_folio); |
| @@ -1307,8 +1307,8 @@ retry: |
| /* at this point we have src_folio locked */ |
| if (folio_test_large(src_folio)) { |
| /* split_folio() can block */ |
| - pte_unmap(&orig_src_pte); |
| - pte_unmap(&orig_dst_pte); |
| + pte_unmap(src_pte); |
| + pte_unmap(dst_pte); |
| src_pte = dst_pte = NULL; |
| err = split_folio(src_folio); |
| if (err) |
| @@ -1333,8 +1333,8 @@ retry: |
| goto out; |
| } |
| if (!anon_vma_trylock_write(src_anon_vma)) { |
| - pte_unmap(&orig_src_pte); |
| - pte_unmap(&orig_dst_pte); |
| + pte_unmap(src_pte); |
| + pte_unmap(dst_pte); |
| src_pte = dst_pte = NULL; |
| /* now we can block and wait */ |
| anon_vma_lock_write(src_anon_vma); |
| @@ -1352,8 +1352,8 @@ retry: |
| entry = pte_to_swp_entry(orig_src_pte); |
| if (non_swap_entry(entry)) { |
| if (is_migration_entry(entry)) { |
| - pte_unmap(&orig_src_pte); |
| - pte_unmap(&orig_dst_pte); |
| + pte_unmap(src_pte); |
| + pte_unmap(dst_pte); |
| src_pte = dst_pte = NULL; |
| migration_entry_wait(mm, src_pmd, src_addr); |
| err = -EAGAIN; |
| @@ -1396,8 +1396,8 @@ retry: |
| src_folio = folio; |
| src_folio_pte = orig_src_pte; |
| if (!folio_trylock(src_folio)) { |
| - pte_unmap(&orig_src_pte); |
| - pte_unmap(&orig_dst_pte); |
| + pte_unmap(src_pte); |
| + pte_unmap(dst_pte); |
| src_pte = dst_pte = NULL; |
| put_swap_device(si); |
| si = NULL; |
| _ |