| From: Josef Bacik <josef@toxicpanda.com> |
| Subject: mm: cleanup flags usage in faultin_page |
| Date: Thu, 18 Jul 2024 17:26:06 -0400 |
| |
| Patch series "mm: some small page fault cleanups". |
| |
| I was recently wreaking havoc in the page fault code and I noticed some |
| things that could be cleaned up. We no longer modify the gup flags in |
| faultin_page, so we can clean up how we pass the flags in and remove the |
| extra variable in __get_user_pages. |
| |
| |
| This patch (of 2): |
| |
| We're passing a pointer to the foll_flags for faultin_page, however we |
| never modify the flags in this call. Change this to just take the flags |
| value instead. |
| |
| Link: https://lkml.kernel.org/r/2df51a54c06bdf93e1cb09a19a9ef1df6557b59e.1721337845.git.josef@toxicpanda.com |
| Signed-off-by: Josef Bacik <josef@toxicpanda.com> |
| Acked-by: David Hildenbrand <david@redhat.com> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| mm/gup.c | 20 ++++++++++---------- |
| 1 file changed, 10 insertions(+), 10 deletions(-) |
| |
| --- a/mm/gup.c~mm-cleanup-flags-usage-in-faultin_page |
| +++ a/mm/gup.c |
| @@ -1153,19 +1153,19 @@ unmap: |
| * to 0 and -EBUSY returned. |
| */ |
| static int faultin_page(struct vm_area_struct *vma, |
| - unsigned long address, unsigned int *flags, bool unshare, |
| + unsigned long address, unsigned int flags, bool unshare, |
| int *locked) |
| { |
| unsigned int fault_flags = 0; |
| vm_fault_t ret; |
| |
| - if (*flags & FOLL_NOFAULT) |
| + if (flags & FOLL_NOFAULT) |
| return -EFAULT; |
| - if (*flags & FOLL_WRITE) |
| + if (flags & FOLL_WRITE) |
| fault_flags |= FAULT_FLAG_WRITE; |
| - if (*flags & FOLL_REMOTE) |
| + if (flags & FOLL_REMOTE) |
| fault_flags |= FAULT_FLAG_REMOTE; |
| - if (*flags & FOLL_UNLOCKABLE) { |
| + if (flags & FOLL_UNLOCKABLE) { |
| fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
| /* |
| * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set |
| @@ -1173,12 +1173,12 @@ static int faultin_page(struct vm_area_s |
| * That's because some callers may not be prepared to |
| * handle early exits caused by non-fatal signals. |
| */ |
| - if (*flags & FOLL_INTERRUPTIBLE) |
| + if (flags & FOLL_INTERRUPTIBLE) |
| fault_flags |= FAULT_FLAG_INTERRUPTIBLE; |
| } |
| - if (*flags & FOLL_NOWAIT) |
| + if (flags & FOLL_NOWAIT) |
| fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; |
| - if (*flags & FOLL_TRIED) { |
| + if (flags & FOLL_TRIED) { |
| /* |
| * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED |
| * can co-exist |
| @@ -1212,7 +1212,7 @@ static int faultin_page(struct vm_area_s |
| } |
| |
| if (ret & VM_FAULT_ERROR) { |
| - int err = vm_fault_to_errno(ret, *flags); |
| + int err = vm_fault_to_errno(ret, flags); |
| |
| if (err) |
| return err; |
| @@ -1490,7 +1490,7 @@ retry: |
| |
| page = follow_page_mask(vma, start, foll_flags, &ctx); |
| if (!page || PTR_ERR(page) == -EMLINK) { |
| - ret = faultin_page(vma, start, &foll_flags, |
| + ret = faultin_page(vma, start, foll_flags, |
| PTR_ERR(page) == -EMLINK, locked); |
| switch (ret) { |
| case 0: |
| _ |