| From 591cc4d57abee76196a0304ef7e869622251fe29 Mon Sep 17 00:00:00 2001 |
| From: Peter Zijlstra <a.p.zijlstra@chello.nl> |
| Date: Fri, 3 Jul 2009 08:44:37 -0500 |
| Subject: [PATCH] mm: page_alloc reduce lock sections further |
| |
| commit 46167aec68f48cbbeff23cae9173bc4d19a7bcda in tip. |
| |
| Split out the pages which are to be freed into a separate list and |
| call free_pages_bulk() outside of the percpu page allocator locks. |
| |
| Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/include/linux/list.h b/include/linux/list.h |
| index 8392884..703cd07 100644 |
| --- a/include/linux/list.h |
| +++ b/include/linux/list.h |
| @@ -359,6 +359,9 @@ static inline void list_splice_tail_init(struct list_head *list, |
| #define list_first_entry(ptr, type, member) \ |
| list_entry((ptr)->next, type, member) |
| |
| +#define list_last_entry(ptr, type, member) \ |
| + list_entry((ptr)->prev, type, member) |
| + |
| /** |
| * list_for_each - iterate over a list |
| * @pos: the &struct list_head to use as a loop cursor. |
| diff --git a/mm/page_alloc.c b/mm/page_alloc.c |
| index 0d15911..777ac72 100644 |
| --- a/mm/page_alloc.c |
| +++ b/mm/page_alloc.c |
| @@ -601,8 +601,9 @@ static void free_pcppages_bulk(struct zone *zone, int count, |
| { |
| int migratetype = 0; |
| int batch_free = 0; |
| + unsigned long flags; |
| |
| - spin_lock(&zone->lock); |
| + spin_lock_irqsave(&zone->lock, flags); |
| zone->all_unreclaimable = 0; |
| zone->pages_scanned = 0; |
| |
| @@ -632,21 +633,26 @@ static void free_pcppages_bulk(struct zone *zone, int count, |
| /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ |
| __free_one_page(page, zone, 0, page_private(page)); |
| trace_mm_page_pcpu_drain(page, 0, page_private(page)); |
| +#ifdef CONFIG_PREEMPT_RT |
| + cond_resched_lock(&zone->lock); |
| +#endif |
| } while (--count && --batch_free && !list_empty(list)); |
| } |
| - spin_unlock(&zone->lock); |
| + spin_unlock_irqrestore(&zone->lock, flags); |
| } |
| |
| static void free_one_page(struct zone *zone, struct page *page, int order, |
| int migratetype) |
| { |
| - spin_lock(&zone->lock); |
| + unsigned long flags; |
| + |
| + spin_lock_irqsave(&zone->lock, flags); |
| zone->all_unreclaimable = 0; |
| zone->pages_scanned = 0; |
| |
| __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); |
| __free_one_page(page, zone, order, migratetype); |
| - spin_unlock(&zone->lock); |
| + spin_unlock_irqrestore(&zone->lock, flags); |
| } |
| |
| static void __free_pages_ok(struct page *page, unsigned int order) |
| @@ -1041,6 +1047,16 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, |
| return i; |
| } |
| |
| +static void |
| +isolate_pcp_pages(int count, struct list_head *src, struct list_head *dst) |
| +{ |
| + while (count--) { |
| + struct page *page = list_last_entry(src, struct page, lru); |
| + list_move(&page->lru, dst); |
| + } |
| +} |
| + |
| + |
| #ifdef CONFIG_NUMA |
| /* |
| * Called from the vmstat counter updater to drain pagesets of this |
| @@ -1052,6 +1068,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, |
| */ |
| void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) |
| { |
| + LIST_HEAD(free_list); |
| unsigned long flags; |
| int to_drain; |
| int this_cpu; |
| @@ -1061,9 +1078,10 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) |
| to_drain = pcp->batch; |
| else |
| to_drain = pcp->count; |
| - free_pcppages_bulk(zone, to_drain, pcp); |
| + isolate_pcp_pages(to_drain, &pcp->list, &free_list); |
| pcp->count -= to_drain; |
| unlock_cpu_pcp(flags, this_cpu); |
| + free_pages_bulk(zone, to_drain, &free_list, 0); |
| } |
| #endif |
| |
| @@ -1082,6 +1100,8 @@ static void drain_pages(unsigned int cpu) |
| for_each_populated_zone(zone) { |
| struct per_cpu_pageset *pset; |
| struct per_cpu_pages *pcp; |
| + LIST_HEAD(free_list); |
| + int count; |
| |
| __lock_cpu_pcp(&flags, cpu); |
| pset = per_cpu_ptr(zone->pageset, cpu); |
| @@ -1092,9 +1112,11 @@ static void drain_pages(unsigned int cpu) |
| continue; |
| } |
| pcp = &pset->pcp; |
| - free_pcppages_bulk(zone, pcp->count, pcp); |
| + isolate_pcp_pages(pcp->count, &pcp->list, &free_list); |
| + count = pcp->count; |
| pcp->count = 0; |
| unlock_cpu_pcp(flags, cpu); |
| + free_pages_bulk(zone, count, &free_list, 0); |
| } |
| } |
| |
| @@ -1201,7 +1223,7 @@ void free_hot_cold_page(struct page *page, int cold) |
| struct per_cpu_pages *pcp; |
| unsigned long flags; |
| int migratetype; |
| - int this_cpu, wasMlocked = __TestClearPageMlocked(page); |
| + int count, this_cpu, wasMlocked = __TestClearPageMlocked(page); |
| |
| trace_mm_page_free_direct(page, 0); |
| kmemcheck_free_shadow(page, 0); |
| @@ -1247,8 +1269,14 @@ void free_hot_cold_page(struct page *page, int cold) |
| list_add(&page->lru, &pcp->lists[migratetype]); |
| pcp->count++; |
| if (pcp->count >= pcp->high) { |
| - free_pcppages_bulk(zone, pcp->batch, pcp); |
| + LIST_HEAD(free_list); |
| + |
| + isolate_pcp_pages(pcp->batch, &pcp->list, &free_list); |
| pcp->count -= pcp->batch; |
| + count = pcp->batch; |
| + put_zone_pcp(zone, flags, this_cpu); |
| + free_pages_bulk(zone, count, &free_list, 0); |
| + return; |
| } |
| |
| out: |
| -- |
| 1.7.1.1 |
| |