blob: c8830f3ff898304f94c78541df4cb819017f4c14 [file] [log] [blame]
Subject: mm-slab-wrap-functions.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 18 Jun 2011 19:44:43 +0200
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
mm/slab.c | 157 ++++++++++++++++++++++++++++++++++++++++++--------------------
1 file changed, 108 insertions(+), 49 deletions(-)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -116,6 +116,7 @@
#include <linux/kmemcheck.h>
#include <linux/memory.h>
#include <linux/prefetch.h>
+#include <linux/locallock.h>
#include <net/sock.h>
@@ -633,6 +634,37 @@ static void slab_set_debugobj_lock_class
#endif
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
+static DEFINE_LOCAL_IRQ_LOCK(slab_lock);
+
+#ifndef CONFIG_PREEMPT_RT_BASE
+# define slab_on_each_cpu(func, cp) on_each_cpu(func, cp, 1)
+#else
+/*
+ * execute func() for all CPUs. On PREEMPT_RT we dont actually have
+ * to run on the remote CPUs - we only have to take their CPU-locks.
+ * (This is a rare operation, so cacheline bouncing is not an issue.)
+ */
+static void
+slab_on_each_cpu(void (*func)(void *arg, int this_cpu), void *arg)
+{
+ unsigned int i;
+
+ get_cpu_light();
+ for_each_online_cpu(i)
+ func(arg, i);
+ put_cpu_light();
+}
+
+static void lock_slab_on(unsigned int cpu)
+{
+ local_lock_irq_on(slab_lock, cpu);
+}
+
+static void unlock_slab_on(unsigned int cpu)
+{
+ local_unlock_irq_on(slab_lock, cpu);
+}
+#endif
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{
@@ -1073,9 +1105,10 @@ static void reap_alien(struct kmem_cache
if (n->alien) {
struct array_cache *ac = n->alien[node];
- if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
+ if (ac && ac->avail &&
+ local_spin_trylock_irq(slab_lock, &ac->lock)) {
__drain_alien_cache(cachep, ac, node);
- spin_unlock_irq(&ac->lock);
+ local_spin_unlock_irq(slab_lock, &ac->lock);
}
}
}
@@ -1090,9 +1123,9 @@ static void drain_alien_cache(struct kme
for_each_online_node(i) {
ac = alien[i];
if (ac) {
- spin_lock_irqsave(&ac->lock, flags);
+ local_spin_lock_irqsave(slab_lock, &ac->lock, flags);
__drain_alien_cache(cachep, ac, i);
- spin_unlock_irqrestore(&ac->lock, flags);
+ local_spin_unlock_irqrestore(slab_lock, &ac->lock, flags);
}
}
}
@@ -1171,11 +1204,11 @@ static int init_cache_node_node(int node
cachep->node[node] = n;
}
- spin_lock_irq(&cachep->node[node]->list_lock);
+ local_spin_lock_irq(slab_lock, &cachep->nodelists[node]->list_lock);
cachep->node[node]->free_limit =
(1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
- spin_unlock_irq(&cachep->node[node]->list_lock);
+ local_spin_unlock_irq(slab_lock, &cachep->nodelists[node]->list_lock);
}
return 0;
}
@@ -1200,7 +1233,7 @@ static void __cpuinit cpuup_canceled(lon
if (!n)
goto free_array_cache;
- spin_lock_irq(&n->list_lock);
+ local_spin_lock_irq(slab_lock, &n->list_lock);
/* Free limit for this kmem_cache_node */
n->free_limit -= cachep->batchcount;
@@ -1208,7 +1241,7 @@ static void __cpuinit cpuup_canceled(lon
free_block(cachep, nc->entry, nc->avail, node);
if (!cpumask_empty(mask)) {
- spin_unlock_irq(&n->list_lock);
+ local_spin_unlock_irq(slab_lock, &n->list_lock);
goto free_array_cache;
}
@@ -1222,7 +1255,7 @@ static void __cpuinit cpuup_canceled(lon
alien = n->alien;
n->alien = NULL;
- spin_unlock_irq(&n->list_lock);
+ local_spin_unlock_irq(slab_lock, &n->list_lock);
kfree(shared);
if (alien) {
@@ -1296,7 +1329,7 @@ static int __cpuinit cpuup_prepare(long
n = cachep->node[node];
BUG_ON(!n);
- spin_lock_irq(&n->list_lock);
+ local_spin_lock_irq(slab_lock, &n->list_lock);
if (!n->shared) {
/*
* We are serialised from CPU_DEAD or
@@ -1311,7 +1344,7 @@ static int __cpuinit cpuup_prepare(long
alien = NULL;
}
#endif
- spin_unlock_irq(&n->list_lock);
+ local_spin_unlock_irq(slab_lock, &n->list_lock);
kfree(shared);
free_alien_cache(alien);
if (cachep->flags & SLAB_DEBUG_OBJECTS)
@@ -1512,6 +1545,8 @@ void __init kmem_cache_init(void)
if (num_possible_nodes() == 1)
use_alien_caches = 0;
+ local_irq_lock_init(slab_lock);
+
for (i = 0; i < NUM_INIT_LISTS; i++)
kmem_cache_node_init(&init_kmem_cache_node[i]);
@@ -2408,7 +2443,7 @@ __kmem_cache_create (struct kmem_cache *
#if DEBUG
static void check_irq_off(void)
{
- BUG_ON(!irqs_disabled());
+ BUG_ON_NONRT(!irqs_disabled());
}
static void check_irq_on(void)
@@ -2443,26 +2478,37 @@ static void drain_array(struct kmem_cach
struct array_cache *ac,
int force, int node);
-static void do_drain(void *arg)
+static void __do_drain(void *arg, unsigned int cpu)+static void __do_drain(void *arg, unsigned int cpu)
{
struct kmem_cache *cachep = arg;
struct array_cache *ac;
- int node = numa_mem_id();
+ int node = cpu_to_mem(cpu);
- check_irq_off();
- ac = cpu_cache_get(cachep);
+ ac = cpu_cache_get_on_cpu(cachep, cpu);
spin_lock(&cachep->node[node]->list_lock);
free_block(cachep, ac->entry, ac->avail, node);
spin_unlock(&cachep->node[node]->list_lock);
ac->avail = 0;
}
+#ifndef CONFIG_PREEMPT_RT_BASE
+static void do_drain(void *arg)
+{
+ __do_drain(arg, smp_processor_id());
+}
+#else
+static void do_drain(void *arg, int this_cpu)
+{
+ __do_drain(arg, this_cpu);
+}
+#endif
+
static void drain_cpu_caches(struct kmem_cache *cachep)
{
struct kmem_cache_node *n;
int node;
- on_each_cpu(do_drain, cachep, 1);
+ slab_on_each_cpu(do_drain, cachep);
check_irq_on();
for_each_online_node(node) {
n = cachep->node[node];
@@ -2493,10 +2539,10 @@ static int drain_freelist(struct kmem_ca
nr_freed = 0;
while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
- spin_lock_irq(&n->list_lock);
+ local_spin_lock_irq(slab_lock, &n->list_lock);
p = n->slabs_free.prev;
if (p == &n->slabs_free) {
- spin_unlock_irq(&n->list_lock);
+ local_spin_unlock_irq(slab_lock, &n->list_lock);
goto out;
}
@@ -2510,7 +2556,7 @@ static int drain_freelist(struct kmem_ca
* to the cache.
*/
n->free_objects -= cache->num;
- spin_unlock_irq(&n->list_lock);
+ local_spin_unlock_irq(slab_lock, &n->list_lock);
slab_destroy(cache, slabp);
nr_freed++;
}
@@ -2785,7 +2831,7 @@ static int cache_grow(struct kmem_cache
offset *= cachep->colour_off;
if (local_flags & __GFP_WAIT)
- local_irq_enable();
+ local_unlock_irq(slab_lock);
/*
* The test for missing atomic flag is performed here, rather than
@@ -2815,7 +2861,7 @@ static int cache_grow(struct kmem_cache
cache_init_objs(cachep, slabp);
if (local_flags & __GFP_WAIT)
- local_irq_disable();
+ local_lock_irq(slab_lock);
check_irq_off();
spin_lock(&n->list_lock);
@@ -2829,7 +2875,7 @@ opps1:
kmem_freepages(cachep, objp);
failed:
if (local_flags & __GFP_WAIT)
- local_irq_disable();
+ local_lock_irq(slab_lock);
return 0;
}
@@ -3243,11 +3289,11 @@ retry:
* set and go into memory reserves if necessary.
*/
if (local_flags & __GFP_WAIT)
- local_irq_enable();
+ local_unlock_irq(slab_lock);
kmem_flagcheck(cache, flags);
obj = kmem_getpages(cache, local_flags, numa_mem_id());
if (local_flags & __GFP_WAIT)
- local_irq_disable();
+ local_lock_irq(slab_lock);
if (obj) {
/*
* Insert into the appropriate per node queues
@@ -3368,7 +3414,7 @@ slab_alloc_node(struct kmem_cache *cache
cachep = memcg_kmem_get_cache(cachep, flags);
cache_alloc_debugcheck_before(cachep, flags);
- local_irq_save(save_flags);
+ local_lock_irqsave(slab_lock, save_flags);
if (nodeid == NUMA_NO_NODE)
nodeid = slab_node;
@@ -3393,7 +3439,7 @@ slab_alloc_node(struct kmem_cache *cache
/* ___cache_alloc_node can fall back to other nodes */
ptr = ____cache_alloc_node(cachep, flags, nodeid);
out:
- local_irq_restore(save_flags);
+ local_unlock_irqrestore(slab_lock, save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
flags);
@@ -3455,9 +3501,9 @@ slab_alloc(struct kmem_cache *cachep, gf
cachep = memcg_kmem_get_cache(cachep, flags);
cache_alloc_debugcheck_before(cachep, flags);
- local_irq_save(save_flags);
+ local_lock_irqsave(slab_lock, save_flags);
objp = __do_cache_alloc(cachep, flags);
- local_irq_restore(save_flags);
+ local_unlock_irqrestore(slab_lock, save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
flags);
@@ -3774,9 +3820,9 @@ void kmem_cache_free(struct kmem_cache *
debug_check_no_locks_freed(objp, cachep->object_size);
if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(objp, cachep->object_size);
- local_irq_save(flags);
+ local_lock_irqsave(slab_lock, flags);
__cache_free(cachep, objp, _RET_IP_);
- local_irq_restore(flags);
+ local_unlock_irqrestore(slab_lock, flags);
trace_kmem_cache_free(_RET_IP_, objp);
}
@@ -3805,9 +3851,9 @@ void kfree(const void *objp)
debug_check_no_locks_freed(objp, c->object_size);
debug_check_no_obj_freed(objp, c->object_size);
- local_irq_save(flags);
+ local_lock_irqsave(slab_lock, flags);
__cache_free(c, (void *)objp, _RET_IP_);
- local_irq_restore(flags);
+ local_unlock_irqrestore(slab_lock, flags);
}
EXPORT_SYMBOL(kfree);
@@ -3844,7 +3890,7 @@ static int alloc_kmemlist(struct kmem_ca
if (n) {
struct array_cache *shared = n->shared;
- spin_lock_irq(&n->list_lock);
+ local_spin_lock_irq(slab_lock, &n->list_lock);
if (shared)
free_block(cachep, shared->entry,
@@ -3857,7 +3903,7 @@ static int alloc_kmemlist(struct kmem_ca
}
n->free_limit = (1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
- spin_unlock_irq(&n->list_lock);
+ local_spin_unlock_irq(slab_lock, &n->list_lock);
kfree(shared);
free_alien_cache(new_alien);
continue;
@@ -3904,17 +3950,28 @@ struct ccupdate_struct {
struct array_cache *new[0];
};
-static void do_ccupdate_local(void *info)
+static void __do_ccupdate_local(void *info, int cpu)
{
struct ccupdate_struct *new = info;
struct array_cache *old;
- check_irq_off();
- old = cpu_cache_get(new->cachep);
+ old = cpu_cache_get_on_cpu(new->cachep, cpu);
+
+ new->cachep->array[cpu] = new->new[cpu];
+ new->new[cpu] = old;
+}
- new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
- new->new[smp_processor_id()] = old;
+#ifndef CONFIG_PREEMPT_RT_BASE
+static void do_ccupdate_local(void *info)
+{
+ __do_ccupdate_local(info, smp_processor_id());
}
+#else
+static void do_ccupdate_local(void *info, int cpu)
+{
+ __do_ccupdate_local(info, cpu);
+}
+#endif
/* Always called with the slab_mutex held */
static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
@@ -3940,7 +3997,7 @@ static int __do_tune_cpucache(struct kme
}
new->cachep = cachep;
- on_each_cpu(do_ccupdate_local, (void *)new, 1);
+ slab_on_each_cpu(do_ccupdate_local, (void *)new);
check_irq_on();
cachep->batchcount = batchcount;
@@ -3951,9 +4008,11 @@ static int __do_tune_cpucache(struct kme
struct array_cache *ccold = new->new[i];
if (!ccold)
continue;
- spin_lock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
+ local_spin_lock_irq(slab_lock,
+ &cachep->node[cpu_to_mem(i)]->list_lock);
free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
- spin_unlock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
+ local_spin_unlock_irq(slab_lock,
+x &cachep->node[cpu_to_mem(i)]->list_lock);
kfree(ccold);
}
kfree(new);
@@ -4068,7 +4127,7 @@ static void drain_array(struct kmem_cach
if (ac->touched && !force) {
ac->touched = 0;
} else {
- spin_lock_irq(&n->list_lock);
+ local_spin_lock_irq(slab_lock, &n->list_lock);
if (ac->avail) {
tofree = force ? ac->avail : (ac->limit + 4) / 5;
if (tofree > ac->avail)
@@ -4078,7 +4137,7 @@ static void drain_array(struct kmem_cach
memmove(ac->entry, &(ac->entry[tofree]),
sizeof(void *) * ac->avail);
}
- spin_unlock_irq(&n->list_lock);
+ local_spin_unlock_irq(slab_lock, &n->list_lock);
}
}
@@ -4171,7 +4230,7 @@ void get_slabinfo(struct kmem_cache *cac
continue;
check_irq_on();
- spin_lock_irq(&n->list_lock);
+ local_spin_lock_irq(slab_lock, &n->list_lock);
list_for_each_entry(slabp, &n->slabs_full, list) {
if (slabp->inuse != cachep->num && !error)
@@ -4196,7 +4255,7 @@ void get_slabinfo(struct kmem_cache *cac
if (n->shared)
shared_avail += n->shared->avail;
- spin_unlock_irq(&n->list_lock);
+ local_spin_unlock_irq(slab_lock, &n->list_lock);
}
num_slabs += active_slabs;
num_objs = num_slabs * cachep->num;
@@ -4396,13 +4455,13 @@ static int leaks_show(struct seq_file *m
continue;
check_irq_on();
- spin_lock_irq(&n->list_lock);
+ local_spin_lock_irq(slab_lock, &n->list_lock);
list_for_each_entry(slabp, &n->slabs_full, list)
handle_slab(x, cachep, slabp);
list_for_each_entry(slabp, &n->slabs_partial, list)
handle_slab(x, cachep, slabp);
- spin_unlock_irq(&n->list_lock);
+ local_spin_unlock_irq(slab_lock, &n->list_lock);
}
name = cachep->name;
if (x[0] == x[1]) {