blob: 924218b30622d040bed30755716482d9561a9795 [file] [log] [blame]
From 3c397d01a8c4299371f50192496ff1cd1fa8e552 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Fri, 3 Jul 2009 08:44:05 -0500
Subject: [PATCH] mm: quicklist: Convert to percpu locked
commit 1578a2b7d1300f4e27cea087e6cdce9b8fbbcb4a in tip.
Use per cpu locked for quicklists as well to make the code
preemptible.
[ tglx: folded Ingo's "release before free page fix" ]
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h
index bd46643..1bc3d46 100644
--- a/include/linux/quicklist.h
+++ b/include/linux/quicklist.h
@@ -18,7 +18,7 @@ struct quicklist {
int nr_pages;
};
-DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
+DECLARE_PER_CPU_LOCKED(struct quicklist, quicklist)[CONFIG_NR_QUICK];
/*
* The two key functions quicklist_alloc and quicklist_free are inline so
@@ -30,19 +30,27 @@ DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
* The fast patch in quicklist_alloc touched only a per cpu cacheline and
* the first cacheline of the page itself. There is minmal overhead involved.
*/
-static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *))
+static inline void *__quicklist_alloc(struct quicklist *q)
{
- struct quicklist *q;
- void **p = NULL;
+ void **p = q->page;
- q =&get_cpu_var(quicklist)[nr];
- p = q->page;
if (likely(p)) {
q->page = p[0];
p[0] = NULL;
q->nr_pages--;
}
- put_cpu_var(quicklist);
+ return p;
+}
+
+static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *))
+{
+ struct quicklist *q;
+ void **p;
+ int cpu;
+
+ q = &get_cpu_var_locked(quicklist, &cpu)[nr];
+ p = __quicklist_alloc(q);
+ put_cpu_var_locked(quicklist, cpu);
if (likely(p))
return p;
@@ -56,12 +64,13 @@ static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p,
struct page *page)
{
struct quicklist *q;
+ int cpu;
- q = &get_cpu_var(quicklist)[nr];
+ q = &get_cpu_var_locked(quicklist, &cpu)[nr];
*(void **)p = q->page;
q->page = p;
q->nr_pages++;
- put_cpu_var(quicklist);
+ put_cpu_var_locked(quicklist, cpu);
}
static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp)
diff --git a/mm/quicklist.c b/mm/quicklist.c
index 6633965..b6fb023 100644
--- a/mm/quicklist.c
+++ b/mm/quicklist.c
@@ -19,7 +19,7 @@
#include <linux/module.h>
#include <linux/quicklist.h>
-DEFINE_PER_CPU(struct quicklist [CONFIG_NR_QUICK], quicklist);
+DEFINE_PER_CPU_LOCKED(struct quicklist [CONFIG_NR_QUICK], quicklist);
#define FRACTION_OF_NODE_MEM 16
@@ -65,17 +65,14 @@ void quicklist_trim(int nr, void (*dtor)(void *),
{
long pages_to_free;
struct quicklist *q;
+ int cpu;
- q = &get_cpu_var(quicklist)[nr];
+ q = &get_cpu_var_locked(quicklist, &cpu)[nr];
if (q->nr_pages > min_pages) {
pages_to_free = min_pages_to_free(q, min_pages, max_free);
while (pages_to_free > 0) {
- /*
- * We pass a gfp_t of 0 to quicklist_alloc here
- * because we will never call into the page allocator.
- */
- void *p = quicklist_alloc(nr, 0, NULL);
+ void *p = __quicklist_alloc(q);
if (dtor)
dtor(p);
@@ -83,7 +80,7 @@ void quicklist_trim(int nr, void (*dtor)(void *),
pages_to_free--;
}
}
- put_cpu_var(quicklist);
+ put_cpu_var_locked(quicklist, cpu);
}
unsigned long quicklist_total_size(void)
@@ -93,7 +90,7 @@ unsigned long quicklist_total_size(void)
struct quicklist *ql, *q;
for_each_online_cpu(cpu) {
- ql = per_cpu(quicklist, cpu);
+ ql = per_cpu_var_locked(quicklist, cpu);
for (q = ql; q < ql + CONFIG_NR_QUICK; q++)
count += q->nr_pages;
}
--
1.7.1.1