| From df30315681197e7d080af69ee7e4bf2961f8374e Mon Sep 17 00:00:00 2001 |
| From: Peter Zijlstra <peterz@infradead.org> |
| Date: Tue, 13 Oct 2009 10:05:40 -0500 |
| Subject: [PATCH] slab: Cover the numa aliens with the per cpu locked changes |
| |
| commit d99f9884403747e8668442b9ce61b28a9d6bcd4a in tip. |
| |
| The numa aliens tear down is not covered by the per cpu locked changes |
| which we did to slab. Fix that. |
| |
| Signed-off-by: Peter Zijlstra <peterz@infradead.org> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/mm/slab.c b/mm/slab.c |
| index 4641c47..985c67b 100644 |
| --- a/mm/slab.c |
| +++ b/mm/slab.c |
| @@ -164,6 +164,9 @@ static void slab_irq_disable_GFP_WAIT(gfp_t flags, int *cpu) |
| local_irq_disable(); |
| } |
| |
| +#define slab_spin_trylock_irq(lock, cpu) \ |
| + ({ int __l = spin_trylock_irq(lock); if (__l) (cpu) = smp_processor_id(); __l; }) |
| + |
| # define slab_spin_lock_irq(lock, cpu) \ |
| do { spin_lock_irq(lock); (cpu) = smp_processor_id(); } while (0) |
| # define slab_spin_unlock_irq(lock, cpu) spin_unlock_irq(lock) |
| @@ -241,10 +244,26 @@ static void slab_irq_disable_GFP_WAIT(gfp_t flags, int *cpu) |
| slab_irq_disable(*cpu); |
| } |
| |
| +static inline int _slab_spin_trylock_irq(spinlock_t *lock, int *cpu) |
| +{ |
| + int locked; |
| + |
| + slab_irq_disable(*cpu); |
| + locked = spin_trylock(lock); |
| + if (!locked) |
| + slab_irq_enable(*cpu); |
| + |
| + return locked; |
| +} |
| + |
| +# define slab_spin_trylock_irq(lock, cpu) \ |
| + _slab_spin_trylock_irq((lock), &(cpu)) |
| + |
| # define slab_spin_lock_irq(lock, cpu) \ |
| do { slab_irq_disable(cpu); spin_lock(lock); } while (0) |
| # define slab_spin_unlock_irq(lock, cpu) \ |
| do { spin_unlock(lock); slab_irq_enable(cpu); } while (0) |
| + |
| # define slab_spin_lock_irqsave(lock, flags, cpu) \ |
| do { slab_irq_disable(cpu); spin_lock_irqsave(lock, flags); } while (0) |
| # define slab_spin_unlock_irqrestore(lock, flags, cpu) \ |
| @@ -1075,7 +1094,7 @@ static int transfer_objects(struct array_cache *to, |
| #ifndef CONFIG_NUMA |
| |
| #define drain_alien_cache(cachep, alien) do { } while (0) |
| -#define reap_alien(cachep, l3, this_cpu) 0 |
| +#define reap_alien(cachep, l3) 0 |
| |
| static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) |
| { |
| @@ -1172,17 +1191,18 @@ static void __drain_alien_cache(struct kmem_cache *cachep, |
| /* |
| * Called from cache_reap() to regularly drain alien caches round robin. |
| */ |
| -static int |
| -reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3, int *this_cpu) |
| +static int reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) |
| { |
| - int node = per_cpu(slab_reap_node, *this_cpu); |
| + int node = __get_cpu_var(slab_reap_node); |
| + int this_cpu; |
| |
| if (l3->alien) { |
| struct array_cache *ac = l3->alien[node]; |
| |
| - if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { |
| - __drain_alien_cache(cachep, ac, node, this_cpu); |
| - spin_unlock_irq(&ac->lock); |
| + if (ac && ac->avail && |
| + slab_spin_trylock_irq(&ac->lock, this_cpu)) { |
| + __drain_alien_cache(cachep, ac, node, &this_cpu); |
| + slab_spin_unlock_irq(&ac->lock, this_cpu); |
| return 1; |
| } |
| } |
| @@ -4293,7 +4313,7 @@ int drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, |
| */ |
| static void cache_reap(struct work_struct *w) |
| { |
| - int this_cpu = raw_smp_processor_id(), node = cpu_to_node(this_cpu); |
| + int this_cpu = smp_processor_id(), node = cpu_to_node(this_cpu); |
| struct kmem_cache *searchp; |
| struct kmem_list3 *l3; |
| struct delayed_work *work = to_delayed_work(w); |
| @@ -4313,7 +4333,7 @@ static void cache_reap(struct work_struct *w) |
| */ |
| l3 = searchp->nodelists[node]; |
| |
| - work_done += reap_alien(searchp, l3, &this_cpu); |
| + work_done += reap_alien(searchp, l3); |
| |
| node = cpu_to_node(this_cpu); |
| |
| -- |
| 1.7.1.1 |
| |