| From: "Matthew Wilcox (Oracle)" <willy@infradead.org> |
| Subject: um: implement the new page table range API |
| Date: Wed, 2 Aug 2023 16:13:55 +0100 |
| |
| Add PFN_PTE_SHIFT and update_mmu_cache_range(). |
| |
| Link: https://lkml.kernel.org/r/20230802151406.3735276-28-willy@infradead.org |
| Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> |
| Acked-by: Mike Rapoport (IBM) <rppt@kernel.org> |
| Cc: Richard Weinberger <richard@nod.at> |
| Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> |
| Cc: Johannes Berg <johannes@sipsolutions.net> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| arch/um/include/asm/pgtable.h | 7 ++----- |
| 1 file changed, 2 insertions(+), 5 deletions(-) |
| |
| --- a/arch/um/include/asm/pgtable.h~um-implement-the-new-page-table-range-api |
| +++ a/arch/um/include/asm/pgtable.h |
| @@ -242,11 +242,7 @@ static inline void set_pte(pte_t *pteptr |
| if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr); |
| } |
| |
| -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
| - pte_t *pteptr, pte_t pteval) |
| -{ |
| - set_pte(pteptr, pteval); |
| -} |
| +#define PFN_PTE_SHIFT PAGE_SHIFT |
| |
| #define __HAVE_ARCH_PTE_SAME |
| static inline int pte_same(pte_t pte_a, pte_t pte_b) |
| @@ -290,6 +286,7 @@ struct mm_struct; |
| extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); |
| |
| #define update_mmu_cache(vma,address,ptep) do {} while (0) |
| +#define update_mmu_cache_range(vmf, vma, address, ptep, nr) do {} while (0) |
| |
| /* |
| * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that |
| _ |