| From: Alexei Starovoitov <ast@kernel.org> |
| Subject: mm/page_alloc: avoid second trylock of zone->lock |
| Date: Sun, 30 Mar 2025 17:28:09 -0700 |
| |
| spin_trylock followed by spin_lock will cause extra write cache access. |
| If the lock is contended it may cause unnecessary cache line bouncing and |
| will execute redundant irq restore/save pair. Therefore, check |
| alloc/fpi_flags first and use spin_trylock or spin_lock. |
| |
| Link: https://lkml.kernel.org/r/20250331002809.94758-1-alexei.starovoitov@gmail.com |
| Fixes: 97769a53f117 ("mm, bpf: Introduce try_alloc_pages() for opportunistic page allocation") |
| Signed-off-by: Alexei Starovoitov <ast@kernel.org> |
| Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> |
| Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Acked-by: Michal Hocko <mhocko@suse.com> |
| Acked-by: Vlastimil Babka <vbabka@suse.cz> |
| Reviewed-by: Harry Yoo <harry.yoo@oracle.com> |
| Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev> |
| Cc: Andrii Nakryiko <andrii@kernel.org> |
| Cc: Daniel Borkman <daniel@iogearbox.net> |
| Cc: Martin KaFai Lau <martin.lau@kernel.org> |
| Cc: Michal Hocko <mhocko@suse.com> |
| Cc: Peter Zijlstra <peterz@infradead.org> |
| Cc: Steven Rostedt <rostedt@goodmis.org> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| mm/page_alloc.c | 15 +++++++++------ |
| 1 file changed, 9 insertions(+), 6 deletions(-) |
| |
| --- a/mm/page_alloc.c~mm-page_alloc-avoid-second-trylock-of-zone-lock |
| +++ a/mm/page_alloc.c |
| @@ -1400,11 +1400,12 @@ static void free_one_page(struct zone *z |
| struct llist_head *llhead; |
| unsigned long flags; |
| |
| - if (!spin_trylock_irqsave(&zone->lock, flags)) { |
| - if (unlikely(fpi_flags & FPI_TRYLOCK)) { |
| + if (unlikely(fpi_flags & FPI_TRYLOCK)) { |
| + if (!spin_trylock_irqsave(&zone->lock, flags)) { |
| add_page_to_zone_llist(zone, page, order); |
| return; |
| } |
| + } else { |
| spin_lock_irqsave(&zone->lock, flags); |
| } |
| |
| @@ -2314,9 +2315,10 @@ static int rmqueue_bulk(struct zone *zon |
| unsigned long flags; |
| int i; |
| |
| - if (!spin_trylock_irqsave(&zone->lock, flags)) { |
| - if (unlikely(alloc_flags & ALLOC_TRYLOCK)) |
| + if (unlikely(alloc_flags & ALLOC_TRYLOCK)) { |
| + if (!spin_trylock_irqsave(&zone->lock, flags)) |
| return 0; |
| + } else { |
| spin_lock_irqsave(&zone->lock, flags); |
| } |
| for (i = 0; i < count; ++i) { |
| @@ -2937,9 +2939,10 @@ struct page *rmqueue_buddy(struct zone * |
| |
| do { |
| page = NULL; |
| - if (!spin_trylock_irqsave(&zone->lock, flags)) { |
| - if (unlikely(alloc_flags & ALLOC_TRYLOCK)) |
| + if (unlikely(alloc_flags & ALLOC_TRYLOCK)) { |
| + if (!spin_trylock_irqsave(&zone->lock, flags)) |
| return NULL; |
| + } else { |
| spin_lock_irqsave(&zone->lock, flags); |
| } |
| if (alloc_flags & ALLOC_HIGHATOMIC) |
| _ |