| From: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> |
| Subject: mm/mremap: put VMA check and prep logic into helper function |
| Date: Thu, 17 Jul 2025 17:55:53 +0100 |
| |
| Rather than lumping everything together in do_mremap(), add a new helper |
| function, check_prep_vma(), to do the work relating to each VMA. |
| |
| This further lays groundwork for subsequent patches which will allow for |
| batched VMA mremap(). |
| |
| Additionally, if we set vrm->new_addr == vrm->addr when prepping the VMA, |
| this avoids us needing to do so in the expand VMA mlocked case. |
| |
| No functional change intended. |
| |
| Link: https://lkml.kernel.org/r/15efa3c57935f7f8894094b94c1803c2f322c511.1752770784.git.lorenzo.stoakes@oracle.com |
| Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> |
| Reviewed-by: Vlastimil Babka <vbabka@suse.cz> |
| Cc: Al Viro <viro@zeniv.linux.org.uk> |
| Cc: Christian Brauner <brauner@kernel.org> |
| Cc: Jan Kara <jack@suse.cz> |
| Cc: Jann Horn <jannh@google.com> |
| Cc: Liam Howlett <liam.howlett@oracle.com> |
| Cc: Peter Xu <peterx@redhat.com> |
| Cc: Rik van Riel <riel@surriel.com> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| mm/mremap.c | 58 ++++++++++++++++++++++++-------------------------- |
| 1 file changed, 28 insertions(+), 30 deletions(-) |
| |
| --- a/mm/mremap.c~mm-mremap-put-vma-check-and-prep-logic-into-helper-function |
| +++ a/mm/mremap.c |
| @@ -1634,7 +1634,6 @@ static bool align_hugetlb(struct vma_rem |
| static unsigned long expand_vma(struct vma_remap_struct *vrm) |
| { |
| unsigned long err; |
| - unsigned long addr = vrm->addr; |
| |
| err = remap_is_valid(vrm); |
| if (err) |
| @@ -1649,16 +1648,8 @@ static unsigned long expand_vma(struct v |
| if (err) |
| return err; |
| |
| - /* |
| - * We want to populate the newly expanded portion of the VMA to |
| - * satisfy the expectation that mlock()'ing a VMA maintains all |
| - * of its pages in memory. |
| - */ |
| - if (vrm->mlocked) |
| - vrm->new_addr = addr; |
| - |
| /* OK we're done! */ |
| - return addr; |
| + return vrm->addr; |
| } |
| |
| /* |
| @@ -1714,10 +1705,33 @@ static unsigned long mremap_at(struct vm |
| return -EINVAL; |
| } |
| |
| +static int check_prep_vma(struct vma_remap_struct *vrm) |
| +{ |
| + struct vm_area_struct *vma = vrm->vma; |
| + |
| + if (!vma) |
| + return -EFAULT; |
| + |
| + /* If mseal()'d, mremap() is prohibited. */ |
| + if (!can_modify_vma(vma)) |
| + return -EPERM; |
| + |
| + /* Align to hugetlb page size, if required. */ |
| + if (is_vm_hugetlb_page(vma) && !align_hugetlb(vrm)) |
| + return -EINVAL; |
| + |
| + vrm_set_delta(vrm); |
| + vrm->remap_type = vrm_remap_type(vrm); |
| + /* For convenience, we set new_addr even if VMA won't move. */ |
| + if (!vrm_implies_new_addr(vrm)) |
| + vrm->new_addr = vrm->addr; |
| + |
| + return 0; |
| +} |
| + |
| static unsigned long do_mremap(struct vma_remap_struct *vrm) |
| { |
| struct mm_struct *mm = current->mm; |
| - struct vm_area_struct *vma; |
| unsigned long res; |
| |
| vrm->old_len = PAGE_ALIGN(vrm->old_len); |
| @@ -1731,26 +1745,10 @@ static unsigned long do_mremap(struct vm |
| return -EINTR; |
| vrm->mmap_locked = true; |
| |
| - vma = vrm->vma = vma_lookup(mm, vrm->addr); |
| - if (!vma) { |
| - res = -EFAULT; |
| - goto out; |
| - } |
| - |
| - /* If mseal()'d, mremap() is prohibited. */ |
| - if (!can_modify_vma(vma)) { |
| - res = -EPERM; |
| - goto out; |
| - } |
| - |
| - /* Align to hugetlb page size, if required. */ |
| - if (is_vm_hugetlb_page(vma) && !align_hugetlb(vrm)) { |
| - res = -EINVAL; |
| + vrm->vma = vma_lookup(current->mm, vrm->addr); |
| + res = check_prep_vma(vrm); |
| + if (res) |
| goto out; |
| - } |
| - |
| - vrm_set_delta(vrm); |
| - vrm->remap_type = vrm_remap_type(vrm); |
| |
| /* Actually execute mremap. */ |
| res = vrm_implies_new_addr(vrm) ? mremap_to(vrm) : mremap_at(vrm); |
| _ |