| From: Peter Xu <peterx@redhat.com> |
| Subject: mm/x86: remove dead code for hugetlbpage.c |
| Date: Wed, 25 May 2022 15:52:20 -0400 |
| |
| It seems to exist since the old times and never used once. Remove them. |
| |
| Link: https://lkml.kernel.org/r/20220525195220.10241-1-peterx@redhat.com |
| Signed-off-by: Peter Xu <peterx@redhat.com> |
| Acked-by: Muchun Song <songmuchun@bytedance.com> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| arch/x86/mm/hugetlbpage.c | 39 ------------------------------------ |
| 1 file changed, 39 deletions(-) |
| |
| --- a/arch/x86/mm/hugetlbpage.c~mm-x86-remove-dead-code-for-hugetlbpagec |
| +++ a/arch/x86/mm/hugetlbpage.c |
| @@ -19,44 +19,6 @@ |
| #include <asm/tlbflush.h> |
| #include <asm/elf.h> |
| |
| -#if 0 /* This is just for testing */ |
| -struct page * |
| -follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) |
| -{ |
| - unsigned long start = address; |
| - int length = 1; |
| - int nr; |
| - struct page *page; |
| - struct vm_area_struct *vma; |
| - |
| - vma = find_vma(mm, addr); |
| - if (!vma || !is_vm_hugetlb_page(vma)) |
| - return ERR_PTR(-EINVAL); |
| - |
| - pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); |
| - |
| - /* hugetlb should be locked, and hence, prefaulted */ |
| - WARN_ON(!pte || pte_none(*pte)); |
| - |
| - page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; |
| - |
| - WARN_ON(!PageHead(page)); |
| - |
| - return page; |
| -} |
| - |
| -int pmd_huge(pmd_t pmd) |
| -{ |
| - return 0; |
| -} |
| - |
| -int pud_huge(pud_t pud) |
| -{ |
| - return 0; |
| -} |
| - |
| -#else |
| - |
| /* |
| * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal |
| * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. |
| @@ -72,7 +34,6 @@ int pud_huge(pud_t pud) |
| { |
| return !!(pud_val(pud) & _PAGE_PSE); |
| } |
| -#endif |
| |
| #ifdef CONFIG_HUGETLB_PAGE |
| static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, |
| _ |