blob: 0a5f482e96a9b74cdaf61feb06858d579170b313 [file] [log] [blame]
From 91bfcdc157db256c0ac752cb3d51d2bb6a54d1fb Mon Sep 17 00:00:00 2001
From: Paul Gortmaker <paul.gortmaker@windriver.com>
Date: Sun, 23 Jan 2011 19:17:40 -0500
Subject: [PATCH] page_alloc: augment percpu pages support for newer kernels
This is a follow-on to:
mm-page_alloc-rt-friendly-per-cpu-pages.patch
(tip ff3fd6afd788760c846a2f4449487debb6c4b0ac)
and:
mm-page_alloc-reduce-lock-sections-further.patch
(tip 46167aec68f48cbbeff23cae9173bc4d19a7bcda)
which is embedded in the big merge up to 33rt (5f854cfc024)
At some point it probably makes sense to munge all three of
these together -- whee.
See the constituent bits spread around in a git tip repo with:
git diff 5f854cfc024622e4aae14d7cf422f6ff86278688^2 \
5f854cfc024622e4aae14d7cf422f6ff86278688 mm/page_alloc.c
(the above merge changeset is a superset of this delta).
You can find the origin of this change in the tip merge commit:
commit 5f854cfc024622e4aae14d7cf422f6ff86278688
Merge: cc24da0 4ec62b2
Author: Thomas Gleixner <tglx@linutronix.de>
Date: Sun Feb 21 20:17:22 2010 +0100
Forward to 2.6.33-rc8
Merge branch 'linus' into rt/head with a pile of conflicts.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Normally there are not significant changes/additions in a merge commit that
are not from any other "normal" commit. But in this case there are, so
break them out into separate explicit commits.
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 777ac72..ab0695e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -195,7 +195,7 @@ static inline void __lock_cpu_pcp(unsigned long *flags, int cpu)
{
#ifdef CONFIG_PREEMPT_RT
spin_lock(&__get_cpu_lock(pcp_locks, cpu));
- flags = 0;
+ *flags = 0;
#else
local_irq_save(*flags);
#endif
@@ -597,10 +597,9 @@ static inline int free_pages_check(struct page *page)
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
- struct per_cpu_pages *pcp)
+ struct per_cpu_pages *pcp)
{
int migratetype = 0;
- int batch_free = 0;
unsigned long flags;
spin_lock_irqsave(&zone->lock, flags);
@@ -608,6 +607,38 @@ static void free_pcppages_bulk(struct zone *zone, int count,
zone->pages_scanned = 0;
__mod_zone_page_state(zone, NR_FREE_PAGES, count);
+
+ for (migratetype =0; migratetype < MIGRATE_PCPTYPES; migratetype++) {
+ struct list_head *list = &pcp->lists[migratetype];
+
+ while (!list_empty(list)) {
+ struct page *page;
+
+ page = list_first_entry(list, struct page, lru);
+ /* must delete as __free_one_page list manipulates */
+ list_del(&page->lru);
+ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
+ __free_one_page(page, zone, 0, page_private(page));
+ trace_mm_page_pcpu_drain(page, 0, page_private(page));
+#ifdef CONFIG_PREEMPT_RT
+ cond_resched_lock(&zone->lock);
+#endif
+ count--;
+ }
+ }
+ WARN_ON(count != 0);
+ spin_unlock_irqrestore(&zone->lock, flags);
+}
+
+static void isolate_pcp_pages(int count, struct per_cpu_pages *src,
+ struct per_cpu_pages *dst)
+{
+ int migratetype, batch_free = 0;
+
+ for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
+ INIT_LIST_HEAD(&dst->lists[migratetype]);
+ migratetype = 0;
+
while (count) {
struct page *page;
struct list_head *list;
@@ -623,22 +654,16 @@ static void free_pcppages_bulk(struct zone *zone, int count,
batch_free++;
if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
- list = &pcp->lists[migratetype];
+ list = &src->lists[migratetype];
} while (list_empty(list));
do {
- page = list_entry(list->prev, struct page, lru);
+ page = list_last_entry(list, struct page, lru);
/* must delete as __free_one_page list manipulates */
list_del(&page->lru);
- /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
- __free_one_page(page, zone, 0, page_private(page));
- trace_mm_page_pcpu_drain(page, 0, page_private(page));
-#ifdef CONFIG_PREEMPT_RT
- cond_resched_lock(&zone->lock);
-#endif
+ list_add(&page->lru, &dst->lists[migratetype]);
} while (--count && --batch_free && !list_empty(list));
}
- spin_unlock_irqrestore(&zone->lock, flags);
}
static void free_one_page(struct zone *zone, struct page *page, int order,
@@ -1047,16 +1072,6 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
return i;
}
-static void
-isolate_pcp_pages(int count, struct list_head *src, struct list_head *dst)
-{
- while (count--) {
- struct page *page = list_last_entry(src, struct page, lru);
- list_move(&page->lru, dst);
- }
-}
-
-
#ifdef CONFIG_NUMA
/*
* Called from the vmstat counter updater to drain pagesets of this
@@ -1068,20 +1083,19 @@ isolate_pcp_pages(int count, struct list_head *src, struct list_head *dst)
*/
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
- LIST_HEAD(free_list);
+ struct per_cpu_pages dst;
unsigned long flags;
- int to_drain;
- int this_cpu;
+ int to_drain, this_cpu;
lock_cpu_pcp(&flags, &this_cpu);
if (pcp->count >= pcp->batch)
to_drain = pcp->batch;
else
to_drain = pcp->count;
- isolate_pcp_pages(to_drain, &pcp->list, &free_list);
+ isolate_pcp_pages(to_drain, pcp, &dst);
pcp->count -= to_drain;
unlock_cpu_pcp(flags, this_cpu);
- free_pages_bulk(zone, to_drain, &free_list, 0);
+ free_pcppages_bulk(zone, to_drain, &dst);
}
#endif
@@ -1099,8 +1113,7 @@ static void drain_pages(unsigned int cpu)
for_each_populated_zone(zone) {
struct per_cpu_pageset *pset;
- struct per_cpu_pages *pcp;
- LIST_HEAD(free_list);
+ struct per_cpu_pages *pcp, dst;
int count;
__lock_cpu_pcp(&flags, cpu);
@@ -1111,12 +1124,13 @@ static void drain_pages(unsigned int cpu)
WARN_ON(1);
continue;
}
+
pcp = &pset->pcp;
- isolate_pcp_pages(pcp->count, &pcp->list, &free_list);
+ isolate_pcp_pages(pcp->count, pcp, &dst);
count = pcp->count;
pcp->count = 0;
unlock_cpu_pcp(flags, cpu);
- free_pages_bulk(zone, count, &free_list, 0);
+ free_pcppages_bulk(zone, count, &dst);
}
}
@@ -1222,8 +1236,8 @@ void free_hot_cold_page(struct page *page, int cold)
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
unsigned long flags;
- int migratetype;
- int count, this_cpu, wasMlocked = __TestClearPageMlocked(page);
+ int migratetype, this_cpu, count;
+ int wasMlocked = __TestClearPageMlocked(page);
trace_mm_page_free_direct(page, 0);
kmemcheck_free_shadow(page, 0);
@@ -1269,16 +1283,15 @@ void free_hot_cold_page(struct page *page, int cold)
list_add(&page->lru, &pcp->lists[migratetype]);
pcp->count++;
if (pcp->count >= pcp->high) {
- LIST_HEAD(free_list);
+ struct per_cpu_pages dst;
- isolate_pcp_pages(pcp->batch, &pcp->list, &free_list);
+ isolate_pcp_pages(pcp->batch, pcp, &dst);
pcp->count -= pcp->batch;
count = pcp->batch;
put_zone_pcp(zone, flags, this_cpu);
- free_pages_bulk(zone, count, &free_list, 0);
+ free_pcppages_bulk(zone, count, &dst);
return;
}
-
out:
put_zone_pcp(zone, flags, this_cpu);
}
@@ -1331,11 +1344,10 @@ again:
pset = get_zone_pcp(zone, &flags, &this_cpu);
if (likely(order == 0)) {
- struct list_head *list;
struct per_cpu_pages *pcp = &pset->pcp;
+ struct list_head *list;
list = &pcp->lists[migratetype];
-
if (list_empty(list)) {
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list,
--
1.7.1.1