| From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> |
| Subject: mm: add a helper to accept page |
| Date: Fri, 9 Aug 2024 14:48:52 +0300 |
| |
| Accept a given struct page and add it free list. |
| |
| The help is useful for physical memory scanners that want to use free |
| unaccepted memory. |
| |
| Link: https://lkml.kernel.org/r/20240809114854.3745464-7-kirill.shutemov@linux.intel.com |
| Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> |
| Acked-by: David Hildenbrand <david@redhat.com> |
| Cc: Borislav Petkov <bp@alien8.de> |
| Cc: Johannes Weiner <hannes@cmpxchg.org> |
| Cc: Matthew Wilcox <willy@infradead.org> |
| Cc: Mel Gorman <mgorman@suse.de> |
| Cc: Mike Rapoport (Microsoft) <rppt@kernel.org> |
| Cc: Tom Lendacky <thomas.lendacky@amd.com> |
| Cc: Vlastimil Babka <vbabka@suse.cz> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| mm/internal.h | 8 +++++++ |
| mm/page_alloc.c | 47 ++++++++++++++++++++++++++++++++++------------ |
| 2 files changed, 43 insertions(+), 12 deletions(-) |
| |
| --- a/mm/internal.h~mm-add-a-helper-to-accept-page |
| +++ a/mm/internal.h |
| @@ -1432,4 +1432,12 @@ unsigned long move_page_tables(struct vm |
| unsigned long new_addr, unsigned long len, |
| bool need_rmap_locks, bool for_stack); |
| |
| +#ifdef CONFIG_UNACCEPTED_MEMORY |
| +void accept_page(struct page *page); |
| +#else /* CONFIG_UNACCEPTED_MEMORY */ |
| +static inline void accept_page(struct page *page) |
| +{ |
| +} |
| +#endif /* CONFIG_UNACCEPTED_MEMORY */ |
| + |
| #endif /* __MM_INTERNAL_H */ |
| --- a/mm/page_alloc.c~mm-add-a-helper-to-accept-page |
| +++ a/mm/page_alloc.c |
| @@ -6935,27 +6935,18 @@ static bool page_contains_unaccepted(str |
| return range_contains_unaccepted_memory(start, PAGE_SIZE << order); |
| } |
| |
| -static bool try_to_accept_memory_one(struct zone *zone) |
| +static void __accept_page(struct zone *zone, unsigned long *flags, |
| + struct page *page) |
| { |
| - unsigned long flags; |
| - struct page *page; |
| bool last; |
| |
| - spin_lock_irqsave(&zone->lock, flags); |
| - page = list_first_entry_or_null(&zone->unaccepted_pages, |
| - struct page, lru); |
| - if (!page) { |
| - spin_unlock_irqrestore(&zone->lock, flags); |
| - return false; |
| - } |
| - |
| list_del(&page->lru); |
| last = list_empty(&zone->unaccepted_pages); |
| |
| account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); |
| __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); |
| __ClearPageUnaccepted(page); |
| - spin_unlock_irqrestore(&zone->lock, flags); |
| + spin_unlock_irqrestore(&zone->lock, *flags); |
| |
| accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER); |
| |
| @@ -6963,6 +6954,38 @@ static bool try_to_accept_memory_one(str |
| |
| if (last) |
| static_branch_dec(&zones_with_unaccepted_pages); |
| +} |
| + |
| +void accept_page(struct page *page) |
| +{ |
| + struct zone *zone = page_zone(page); |
| + unsigned long flags; |
| + |
| + spin_lock_irqsave(&zone->lock, flags); |
| + if (!PageUnaccepted(page)) { |
| + spin_unlock_irqrestore(&zone->lock, flags); |
| + return; |
| + } |
| + |
| + /* Unlocks zone->lock */ |
| + __accept_page(zone, &flags, page); |
| +} |
| + |
| +static bool try_to_accept_memory_one(struct zone *zone) |
| +{ |
| + unsigned long flags; |
| + struct page *page; |
| + |
| + spin_lock_irqsave(&zone->lock, flags); |
| + page = list_first_entry_or_null(&zone->unaccepted_pages, |
| + struct page, lru); |
| + if (!page) { |
| + spin_unlock_irqrestore(&zone->lock, flags); |
| + return false; |
| + } |
| + |
| + /* Unlocks zone->lock */ |
| + __accept_page(zone, &flags, page); |
| |
| return true; |
| } |
| _ |