blob: c129e1ef4b102f7b8eb150b93d940ee21a990919 [file] [log] [blame]
From 890b728c18986e6b49278600607942d4fb94d693 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <a.p.zijlstra@chello.nl>
Date: Fri, 3 Jul 2009 08:44:37 -0500
Subject: [PATCH 094/262] mm: page_alloc reduce lock sections further
Split out the pages which are to be freed into a separate list and
call free_pages_bulk() outside of the percpu page allocator locks.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
mm/page_alloc.c | 77 +++++++++++++++++++++++++++++++++++++++++--------------
1 file changed, 58 insertions(+), 19 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b0f5314..9f0c53e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -632,7 +632,7 @@ static inline int free_pages_check(struct page *page)
}
/*
- * Frees a number of pages from the PCP lists
+ * Frees a number of pages which have been collected from the pcp lists.
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
@@ -643,16 +643,42 @@ static inline int free_pages_check(struct page *page)
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
- struct per_cpu_pages *pcp)
+ struct list_head *list)
{
- int migratetype = 0;
- int batch_free = 0;
int to_free = count;
+ unsigned long flags;
- spin_lock(&zone->lock);
+ spin_lock_irqsave(&zone->lock, flags);
zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
+ while (!list_empty(list)) {
+ struct page *page = list_first_entry(list, struct page, lru);
+
+ /* must delete as __free_one_page list manipulates */
+ list_del(&page->lru);
+ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
+ __free_one_page(page, zone, 0, page_private(page));
+ trace_mm_page_pcpu_drain(page, 0, page_private(page));
+ to_free--;
+ }
+ WARN_ON(to_free != 0);
+ __mod_zone_page_state(zone, NR_FREE_PAGES, count);
+ spin_unlock_irqrestore(&zone->lock, flags);
+}
+
+/*
+ * Moves a number of pages from the PCP lists to free list which
+ * is freed outside of the locked region.
+ *
+ * Assumes all pages on list are in same zone, and of same order.
+ * count is the number of pages to free.
+ */
+static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src,
+ struct list_head *dst)
+{
+ int migratetype = 0, batch_free = 0;
+
while (to_free) {
struct page *page;
struct list_head *list;
@@ -668,7 +694,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
batch_free++;
if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
- list = &pcp->lists[migratetype];
+ list = &src->lists[migratetype];
} while (list_empty(list));
/* This is the only non-empty list. Free them all. */
@@ -677,27 +703,24 @@ static void free_pcppages_bulk(struct zone *zone, int count,
do {
page = list_last_entry(list, struct page, lru);
- /* must delete as __free_one_page list manipulates */
list_del(&page->lru);
- /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
- __free_one_page(page, zone, 0, page_private(page));
- trace_mm_page_pcpu_drain(page, 0, page_private(page));
+ list_add(&page->lru, dst);
} while (--to_free && --batch_free && !list_empty(list));
}
- __mod_zone_page_state(zone, NR_FREE_PAGES, count);
- spin_unlock(&zone->lock);
}
static void free_one_page(struct zone *zone, struct page *page, int order,
int migratetype)
{
- spin_lock(&zone->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&zone->lock, flags);
zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
__free_one_page(page, zone, order, migratetype);
__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
- spin_unlock(&zone->lock);
+ spin_unlock_irqrestore(&zone->lock, flags);
}
static bool free_pages_prepare(struct page *page, unsigned int order)
@@ -1124,6 +1147,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long flags;
+ LIST_HEAD(dst);
int to_drain;
local_lock_irqsave(pa_lock, flags);
@@ -1131,9 +1155,10 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
to_drain = pcp->batch;
else
to_drain = pcp->count;
- free_pcppages_bulk(zone, to_drain, pcp);
+ isolate_pcp_pages(to_drain, pcp, &dst);
pcp->count -= to_drain;
local_unlock_irqrestore(pa_lock, flags);
+ free_pcppages_bulk(zone, to_drain, &dst);
}
#endif
@@ -1152,16 +1177,21 @@ static void drain_pages(unsigned int cpu)
for_each_populated_zone(zone) {
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
+ LIST_HEAD(dst);
+ int count;
cpu_lock_irqsave(cpu, flags);
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
- if (pcp->count) {
- free_pcppages_bulk(zone, pcp->count, pcp);
+ count = pcp->count;
+ if (count) {
+ isolate_pcp_pages(count, pcp, &dst);
pcp->count = 0;
}
cpu_unlock_irqrestore(cpu, flags);
+ if (count)
+ free_pcppages_bulk(zone, count, &dst);
}
}
@@ -1302,8 +1332,15 @@ void free_hot_cold_page(struct page *page, int cold)
list_add(&page->lru, &pcp->lists[migratetype]);
pcp->count++;
if (pcp->count >= pcp->high) {
- free_pcppages_bulk(zone, pcp->batch, pcp);
+ LIST_HEAD(dst);
+ int count;
+
+ isolate_pcp_pages(pcp->batch, pcp, &dst);
pcp->count -= pcp->batch;
+ count = pcp->batch;
+ local_unlock_irqrestore(pa_lock, flags);
+ free_pcppages_bulk(zone, count, &dst);
+ return;
}
out:
@@ -3874,12 +3911,14 @@ static int __zone_pcp_update(void *data)
for_each_possible_cpu(cpu) {
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
+ LIST_HEAD(dst);
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
cpu_lock_irqsave(cpu, flags);
- free_pcppages_bulk(zone, pcp->count, pcp);
+ isolate_pcp_pages(pcp->count, pcp, &dst);
+ free_pcppages_bulk(zone, pcp->count, &dst);
setup_pageset(pset, batch);
cpu_unlock_irqrestore(cpu, flags);
}
--
1.7.10.4