| From: Kefeng Wang <wangkefeng.wang@huawei.com> |
| Subject: mm: remove page_mkclean() |
| Date: Tue, 4 Jun 2024 19:48:22 +0800 |
| |
| There are no more users of page_mkclean(), remove it and update the |
| document and comment. |
| |
| Link: https://lkml.kernel.org/r/20240604114822.2089819-5-wangkefeng.wang@huawei.com |
| Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> |
| Acked-by: David Hildenbrand <david@redhat.com> |
| Cc: Daniel Vetter <daniel@ffwll.ch> |
| Cc: Helge Deller <deller@gmx.de> |
| Cc: Jonathan Corbet <corbet@lwn.net> |
| Cc: Matthew Wilcox (Oracle) <willy@infradead.org> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| Documentation/core-api/pin_user_pages.rst | 8 ++++---- |
| drivers/video/fbdev/core/fb_defio.c | 4 ++-- |
| include/linux/mm.h | 2 +- |
| include/linux/rmap.h | 4 ---- |
| mm/gup.c | 2 +- |
| mm/mremap.c | 2 +- |
| 6 files changed, 9 insertions(+), 13 deletions(-) |
| |
| --- a/Documentation/core-api/pin_user_pages.rst~mm-remove-page_mkclean |
| +++ a/Documentation/core-api/pin_user_pages.rst |
| @@ -132,7 +132,7 @@ CASE 1: Direct IO (DIO) |
| ----------------------- |
| There are GUP references to pages that are serving |
| as DIO buffers. These buffers are needed for a relatively short time (so they |
| -are not "long term"). No special synchronization with page_mkclean() or |
| +are not "long term"). No special synchronization with folio_mkclean() or |
| munmap() is provided. Therefore, flags to set at the call site are: :: |
| |
| FOLL_PIN |
| @@ -144,7 +144,7 @@ CASE 2: RDMA |
| ------------ |
| There are GUP references to pages that are serving as DMA |
| buffers. These buffers are needed for a long time ("long term"). No special |
| -synchronization with page_mkclean() or munmap() is provided. Therefore, flags |
| +synchronization with folio_mkclean() or munmap() is provided. Therefore, flags |
| to set at the call site are: :: |
| |
| FOLL_PIN | FOLL_LONGTERM |
| @@ -170,7 +170,7 @@ callback, simply remove the range from t |
| |
| Either way, as long as the driver unpins the pages upon mmu notifier callback, |
| then there is proper synchronization with both filesystem and mm |
| -(page_mkclean(), munmap(), etc). Therefore, neither flag needs to be set. |
| +(folio_mkclean(), munmap(), etc). Therefore, neither flag needs to be set. |
| |
| CASE 4: Pinning for struct page manipulation only |
| ------------------------------------------------- |
| @@ -200,7 +200,7 @@ folio_maybe_dma_pinned(): the whole poin |
| ==================================================== |
| |
| The whole point of marking folios as "DMA-pinned" or "gup-pinned" is to be able |
| -to query, "is this folio DMA-pinned?" That allows code such as page_mkclean() |
| +to query, "is this folio DMA-pinned?" That allows code such as folio_mkclean() |
| (and file system writeback code in general) to make informed decisions about |
| what to do when a folio cannot be unmapped due to such pins. |
| |
| --- a/drivers/video/fbdev/core/fb_defio.c~mm-remove-page_mkclean |
| +++ a/drivers/video/fbdev/core/fb_defio.c |
| @@ -113,7 +113,7 @@ static vm_fault_t fb_deferred_io_fault(s |
| printk(KERN_ERR "no mapping available\n"); |
| |
| BUG_ON(!page->mapping); |
| - page->index = vmf->pgoff; /* for page_mkclean() */ |
| + page->index = vmf->pgoff; /* for folio_mkclean() */ |
| |
| vmf->page = page; |
| return 0; |
| @@ -161,7 +161,7 @@ static vm_fault_t fb_deferred_io_track_p |
| |
| /* |
| * We want the page to remain locked from ->page_mkwrite until |
| - * the PTE is marked dirty to avoid page_mkclean() being called |
| + * the PTE is marked dirty to avoid folio_mkclean() being called |
| * before the PTE is updated, which would leave the page ignored |
| * by defio. |
| * Do this by locking the page here and informing the caller |
| --- a/include/linux/mm.h~mm-remove-page_mkclean |
| +++ a/include/linux/mm.h |
| @@ -1577,7 +1577,7 @@ static inline void put_page(struct page |
| * issue. |
| * |
| * Locking: the lockless algorithm described in folio_try_get_rcu() |
| - * provides safe operation for get_user_pages(), page_mkclean() and |
| + * provides safe operation for get_user_pages(), folio_mkclean() and |
| * other calls that race to set up page table entries. |
| */ |
| #define GUP_PIN_COUNTING_BIAS (1U << 10) |
| --- a/include/linux/rmap.h~mm-remove-page_mkclean |
| +++ a/include/linux/rmap.h |
| @@ -802,8 +802,4 @@ static inline int folio_mkclean(struct f |
| } |
| #endif /* CONFIG_MMU */ |
| |
| -static inline int page_mkclean(struct page *page) |
| -{ |
| - return folio_mkclean(page_folio(page)); |
| -} |
| #endif /* _LINUX_RMAP_H */ |
| --- a/mm/gup.c~mm-remove-page_mkclean |
| +++ a/mm/gup.c |
| @@ -378,7 +378,7 @@ void unpin_user_pages_dirty_lock(struct |
| * 1) This code sees the page as already dirty, so it |
| * skips the call to set_page_dirty(). That could happen |
| * because clear_page_dirty_for_io() called |
| - * page_mkclean(), followed by set_page_dirty(). |
| + * folio_mkclean(), followed by set_page_dirty(). |
| * However, now the page is going to get written back, |
| * which meets the original intention of setting it |
| * dirty, so all is well: clear_page_dirty_for_io() goes |
| --- a/mm/mremap.c~mm-remove-page_mkclean |
| +++ a/mm/mremap.c |
| @@ -198,7 +198,7 @@ static int move_ptes(struct vm_area_stru |
| * PTE. |
| * |
| * NOTE! Both old and new PTL matter: the old one |
| - * for racing with page_mkclean(), the new one to |
| + * for racing with folio_mkclean(), the new one to |
| * make sure the physical page stays valid until |
| * the TLB entry for the old mapping has been |
| * flushed. |
| _ |