| From: Michael Ellerman <mpe@ellerman.id.au> |
| Subject: mm: remove arch_unmap() |
| Date: Mon, 12 Aug 2024 18:26:04 +1000 |
| |
| Now that powerpc no longer uses arch_unmap() to handle VDSO unmapping, |
| there are no meaningful implementions left. Drop support for it entirely, |
| and update comments which refer to it. |
| |
| Link: https://lkml.kernel.org/r/20240812082605.743814-3-mpe@ellerman.id.au |
| Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> |
| Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> |
| Acked-by: David Hildenbrand <david@redhat.com> |
| Reviewed-by: Thomas Gleixner <tglx@linutronix.de> |
| Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com> |
| Cc: Christophe Leroy <christophe.leroy@csgroup.eu> |
| Cc: Jeff Xu <jeffxu@google.com> |
| Cc: Nicholas Piggin <npiggin@gmail.com> |
| Cc: Pedro Falcato <pedro.falcato@gmail.com> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| arch/powerpc/include/asm/mmu_context.h | 5 ----- |
| arch/x86/include/asm/mmu_context.h | 5 ----- |
| include/asm-generic/mm_hooks.h | 11 +++-------- |
| mm/mmap.c | 4 +--- |
| mm/vma.c | 8 ++------ |
| 5 files changed, 6 insertions(+), 27 deletions(-) |
| |
| --- a/arch/powerpc/include/asm/mmu_context.h~mm-remove-arch_unmap |
| +++ a/arch/powerpc/include/asm/mmu_context.h |
| @@ -260,11 +260,6 @@ static inline void enter_lazy_tlb(struct |
| |
| extern void arch_exit_mmap(struct mm_struct *mm); |
| |
| -static inline void arch_unmap(struct mm_struct *mm, |
| - unsigned long start, unsigned long end) |
| -{ |
| -} |
| - |
| #ifdef CONFIG_PPC_MEM_KEYS |
| bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, |
| bool execute, bool foreign); |
| --- a/arch/x86/include/asm/mmu_context.h~mm-remove-arch_unmap |
| +++ a/arch/x86/include/asm/mmu_context.h |
| @@ -232,11 +232,6 @@ static inline bool is_64bit_mm(struct mm |
| } |
| #endif |
| |
| -static inline void arch_unmap(struct mm_struct *mm, unsigned long start, |
| - unsigned long end) |
| -{ |
| -} |
| - |
| /* |
| * We only want to enforce protection keys on the current process |
| * because we effectively have no access to PKRU for other |
| --- a/include/asm-generic/mm_hooks.h~mm-remove-arch_unmap |
| +++ a/include/asm-generic/mm_hooks.h |
| @@ -1,8 +1,8 @@ |
| /* SPDX-License-Identifier: GPL-2.0 */ |
| /* |
| - * Define generic no-op hooks for arch_dup_mmap, arch_exit_mmap |
| - * and arch_unmap to be included in asm-FOO/mmu_context.h for any |
| - * arch FOO which doesn't need to hook these. |
| + * Define generic no-op hooks for arch_dup_mmap and arch_exit_mmap |
| + * to be included in asm-FOO/mmu_context.h for any arch FOO which |
| + * doesn't need to hook these. |
| */ |
| #ifndef _ASM_GENERIC_MM_HOOKS_H |
| #define _ASM_GENERIC_MM_HOOKS_H |
| @@ -17,11 +17,6 @@ static inline void arch_exit_mmap(struct |
| { |
| } |
| |
| -static inline void arch_unmap(struct mm_struct *mm, |
| - unsigned long start, unsigned long end) |
| -{ |
| -} |
| - |
| static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, |
| bool write, bool execute, bool foreign) |
| { |
| --- a/mm/mmap.c~mm-remove-arch_unmap |
| +++ a/mm/mmap.c |
| @@ -1743,14 +1743,12 @@ int do_vma_munmap(struct vma_iterator *v |
| struct mm_struct *mm = vma->vm_mm; |
| |
| /* |
| - * Check if memory is sealed before arch_unmap. |
| - * Prevent unmapping a sealed VMA. |
| + * Check if memory is sealed, prevent unmapping a sealed VMA. |
| * can_modify_mm assumes we have acquired the lock on MM. |
| */ |
| if (unlikely(!can_modify_mm(mm, start, end))) |
| return -EPERM; |
| |
| - arch_unmap(mm, start, end); |
| return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); |
| } |
| |
| --- a/mm/vma.c~mm-remove-arch_unmap |
| +++ a/mm/vma.c |
| @@ -841,7 +841,7 @@ map_count_exceeded: |
| * |
| * This function takes a @mas that is either pointing to the previous VMA or set |
| * to MA_START and sets it up to remove the mapping(s). The @len will be |
| - * aligned and any arch_unmap work will be preformed. |
| + * aligned. |
| * |
| * Return: 0 on success and drops the lock if so directed, error and leaves the |
| * lock held otherwise. |
| @@ -861,16 +861,12 @@ int do_vmi_munmap(struct vma_iterator *v |
| return -EINVAL; |
| |
| /* |
| - * Check if memory is sealed before arch_unmap. |
| - * Prevent unmapping a sealed VMA. |
| + * Check if memory is sealed, prevent unmapping a sealed VMA. |
| * can_modify_mm assumes we have acquired the lock on MM. |
| */ |
| if (unlikely(!can_modify_mm(mm, start, end))) |
| return -EPERM; |
| |
| - /* arch_unmap() might do unmaps itself. */ |
| - arch_unmap(mm, start, end); |
| - |
| /* Find the first overlapping VMA */ |
| vma = vma_find(vmi, end); |
| if (!vma) { |
| _ |