| Subject: mm: Enable SLUB for RT |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Thu, 25 Oct 2012 10:32:35 +0100 |
| |
| Make SLUB RT aware by converting locks to raw and using free lists to |
| move the freeing out of the lock held region. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| --- |
| mm/slab.h | 4 + |
| mm/slub.c | 125 ++++++++++++++++++++++++++++++++++++++++++++++++-------------- |
| 2 files changed, 102 insertions(+), 27 deletions(-) |
| |
| --- a/mm/slab.h |
| +++ b/mm/slab.h |
| @@ -324,7 +324,11 @@ static inline struct kmem_cache *cache_f |
| * The slab lists for all objects. |
| */ |
| struct kmem_cache_node { |
| +#ifdef CONFIG_SLUB |
| + raw_spinlock_t list_lock; |
| +#else |
| spinlock_t list_lock; |
| +#endif |
| |
| #ifdef CONFIG_SLAB |
| struct list_head slabs_partial; /* partial list first, better asm code */ |
| --- a/mm/slub.c |
| +++ b/mm/slub.c |
| @@ -1075,7 +1075,7 @@ static noinline struct kmem_cache_node * |
| void *object = head; |
| int cnt = 0; |
| |
| - spin_lock_irqsave(&n->list_lock, *flags); |
| + raw_spin_lock_irqsave(&n->list_lock, *flags); |
| slab_lock(page); |
| |
| if (!check_slab(s, page)) |
| @@ -1136,7 +1136,7 @@ static noinline struct kmem_cache_node * |
| |
| fail: |
| slab_unlock(page); |
| - spin_unlock_irqrestore(&n->list_lock, *flags); |
| + raw_spin_unlock_irqrestore(&n->list_lock, *flags); |
| slab_fix(s, "Object at 0x%p not freed", object); |
| return NULL; |
| } |
| @@ -1263,6 +1263,12 @@ static inline void dec_slabs_node(struct |
| |
| #endif /* CONFIG_SLUB_DEBUG */ |
| |
| +struct slub_free_list { |
| + raw_spinlock_t lock; |
| + struct list_head list; |
| +}; |
| +static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); |
| + |
| /* |
| * Hooks for other subsystems that check memory allocations. In a typical |
| * production configuration these hooks all should produce no code at all. |
| @@ -1402,7 +1408,11 @@ static struct page *allocate_slab(struct |
| |
| flags &= gfp_allowed_mask; |
| |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| + if (system_state == SYSTEM_RUNNING) |
| +#else |
| if (gfpflags_allow_blocking(flags)) |
| +#endif |
| local_irq_enable(); |
| |
| flags |= s->allocflags; |
| @@ -1473,7 +1483,11 @@ static struct page *allocate_slab(struct |
| page->frozen = 1; |
| |
| out: |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| + if (system_state == SYSTEM_RUNNING) |
| +#else |
| if (gfpflags_allow_blocking(flags)) |
| +#endif |
| local_irq_disable(); |
| if (!page) |
| return NULL; |
| @@ -1529,6 +1543,16 @@ static void __free_slab(struct kmem_cach |
| __free_kmem_pages(page, order); |
| } |
| |
| +static void free_delayed(struct list_head *h) |
| +{ |
| + while(!list_empty(h)) { |
| + struct page *page = list_first_entry(h, struct page, lru); |
| + |
| + list_del(&page->lru); |
| + __free_slab(page->slab_cache, page); |
| + } |
| +} |
| + |
| #define need_reserve_slab_rcu \ |
| (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) |
| |
| @@ -1560,6 +1584,12 @@ static void free_slab(struct kmem_cache |
| } |
| |
| call_rcu(head, rcu_free_slab); |
| + } else if (irqs_disabled()) { |
| + struct slub_free_list *f = this_cpu_ptr(&slub_free_list); |
| + |
| + raw_spin_lock(&f->lock); |
| + list_add(&page->lru, &f->list); |
| + raw_spin_unlock(&f->lock); |
| } else |
| __free_slab(s, page); |
| } |
| @@ -1673,7 +1703,7 @@ static void *get_partial_node(struct kme |
| if (!n || !n->nr_partial) |
| return NULL; |
| |
| - spin_lock(&n->list_lock); |
| + raw_spin_lock(&n->list_lock); |
| list_for_each_entry_safe(page, page2, &n->partial, lru) { |
| void *t; |
| |
| @@ -1698,7 +1728,7 @@ static void *get_partial_node(struct kme |
| break; |
| |
| } |
| - spin_unlock(&n->list_lock); |
| + raw_spin_unlock(&n->list_lock); |
| return object; |
| } |
| |
| @@ -1944,7 +1974,7 @@ static void deactivate_slab(struct kmem_ |
| * that acquire_slab() will see a slab page that |
| * is frozen |
| */ |
| - spin_lock(&n->list_lock); |
| + raw_spin_lock(&n->list_lock); |
| } |
| } else { |
| m = M_FULL; |
| @@ -1955,7 +1985,7 @@ static void deactivate_slab(struct kmem_ |
| * slabs from diagnostic functions will not see |
| * any frozen slabs. |
| */ |
| - spin_lock(&n->list_lock); |
| + raw_spin_lock(&n->list_lock); |
| } |
| } |
| |
| @@ -1990,7 +2020,7 @@ static void deactivate_slab(struct kmem_ |
| goto redo; |
| |
| if (lock) |
| - spin_unlock(&n->list_lock); |
| + raw_spin_unlock(&n->list_lock); |
| |
| if (m == M_FREE) { |
| stat(s, DEACTIVATE_EMPTY); |
| @@ -2022,10 +2052,10 @@ static void unfreeze_partials(struct kme |
| n2 = get_node(s, page_to_nid(page)); |
| if (n != n2) { |
| if (n) |
| - spin_unlock(&n->list_lock); |
| + raw_spin_unlock(&n->list_lock); |
| |
| n = n2; |
| - spin_lock(&n->list_lock); |
| + raw_spin_lock(&n->list_lock); |
| } |
| |
| do { |
| @@ -2054,7 +2084,7 @@ static void unfreeze_partials(struct kme |
| } |
| |
| if (n) |
| - spin_unlock(&n->list_lock); |
| + raw_spin_unlock(&n->list_lock); |
| |
| while (discard_page) { |
| page = discard_page; |
| @@ -2093,14 +2123,21 @@ static void put_cpu_partial(struct kmem_ |
| pobjects = oldpage->pobjects; |
| pages = oldpage->pages; |
| if (drain && pobjects > s->cpu_partial) { |
| + struct slub_free_list *f; |
| unsigned long flags; |
| + LIST_HEAD(tofree); |
| /* |
| * partial array is full. Move the existing |
| * set to the per node partial list. |
| */ |
| local_irq_save(flags); |
| unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); |
| + f = this_cpu_ptr(&slub_free_list); |
| + raw_spin_lock(&f->lock); |
| + list_splice_init(&f->list, &tofree); |
| + raw_spin_unlock(&f->lock); |
| local_irq_restore(flags); |
| + free_delayed(&tofree); |
| oldpage = NULL; |
| pobjects = 0; |
| pages = 0; |
| @@ -2172,7 +2209,22 @@ static bool has_cpu_slab(int cpu, void * |
| |
| static void flush_all(struct kmem_cache *s) |
| { |
| + LIST_HEAD(tofree); |
| + int cpu; |
| + |
| on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); |
| + for_each_online_cpu(cpu) { |
| + struct slub_free_list *f; |
| + |
| + if (!has_cpu_slab(cpu, s)) |
| + continue; |
| + |
| + f = &per_cpu(slub_free_list, cpu); |
| + raw_spin_lock_irq(&f->lock); |
| + list_splice_init(&f->list, &tofree); |
| + raw_spin_unlock_irq(&f->lock); |
| + free_delayed(&tofree); |
| + } |
| } |
| |
| /* |
| @@ -2208,10 +2260,10 @@ static unsigned long count_partial(struc |
| unsigned long x = 0; |
| struct page *page; |
| |
| - spin_lock_irqsave(&n->list_lock, flags); |
| + raw_spin_lock_irqsave(&n->list_lock, flags); |
| list_for_each_entry(page, &n->partial, lru) |
| x += get_count(page); |
| - spin_unlock_irqrestore(&n->list_lock, flags); |
| + raw_spin_unlock_irqrestore(&n->list_lock, flags); |
| return x; |
| } |
| #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ |
| @@ -2349,8 +2401,10 @@ static inline void *get_freelist(struct |
| * already disabled (which is the case for bulk allocation). |
| */ |
| static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, |
| - unsigned long addr, struct kmem_cache_cpu *c) |
| + unsigned long addr, struct kmem_cache_cpu *c, |
| + struct list_head *to_free) |
| { |
| + struct slub_free_list *f; |
| void *freelist; |
| struct page *page; |
| |
| @@ -2410,6 +2464,13 @@ static void *___slab_alloc(struct kmem_c |
| VM_BUG_ON(!c->page->frozen); |
| c->freelist = get_freepointer(s, freelist); |
| c->tid = next_tid(c->tid); |
| + |
| +out: |
| + f = this_cpu_ptr(&slub_free_list); |
| + raw_spin_lock(&f->lock); |
| + list_splice_init(&f->list, to_free); |
| + raw_spin_unlock(&f->lock); |
| + |
| return freelist; |
| |
| new_slab: |
| @@ -2441,7 +2502,7 @@ static void *___slab_alloc(struct kmem_c |
| deactivate_slab(s, page, get_freepointer(s, freelist)); |
| c->page = NULL; |
| c->freelist = NULL; |
| - return freelist; |
| + goto out; |
| } |
| |
| /* |
| @@ -2453,6 +2514,7 @@ static void *__slab_alloc(struct kmem_ca |
| { |
| void *p; |
| unsigned long flags; |
| + LIST_HEAD(tofree); |
| |
| local_irq_save(flags); |
| #ifdef CONFIG_PREEMPT |
| @@ -2464,8 +2526,9 @@ static void *__slab_alloc(struct kmem_ca |
| c = this_cpu_ptr(s->cpu_slab); |
| #endif |
| |
| - p = ___slab_alloc(s, gfpflags, node, addr, c); |
| + p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree); |
| local_irq_restore(flags); |
| + free_delayed(&tofree); |
| return p; |
| } |
| |
| @@ -2652,7 +2715,7 @@ static void __slab_free(struct kmem_cach |
| |
| do { |
| if (unlikely(n)) { |
| - spin_unlock_irqrestore(&n->list_lock, flags); |
| + raw_spin_unlock_irqrestore(&n->list_lock, flags); |
| n = NULL; |
| } |
| prior = page->freelist; |
| @@ -2684,7 +2747,7 @@ static void __slab_free(struct kmem_cach |
| * Otherwise the list_lock will synchronize with |
| * other processors updating the list of slabs. |
| */ |
| - spin_lock_irqsave(&n->list_lock, flags); |
| + raw_spin_lock_irqsave(&n->list_lock, flags); |
| |
| } |
| } |
| @@ -2726,7 +2789,7 @@ static void __slab_free(struct kmem_cach |
| add_partial(n, page, DEACTIVATE_TO_TAIL); |
| stat(s, FREE_ADD_PARTIAL); |
| } |
| - spin_unlock_irqrestore(&n->list_lock, flags); |
| + raw_spin_unlock_irqrestore(&n->list_lock, flags); |
| return; |
| |
| slab_empty: |
| @@ -2741,7 +2804,7 @@ static void __slab_free(struct kmem_cach |
| remove_full(s, n, page); |
| } |
| |
| - spin_unlock_irqrestore(&n->list_lock, flags); |
| + raw_spin_unlock_irqrestore(&n->list_lock, flags); |
| stat(s, FREE_SLAB); |
| discard_slab(s, page); |
| } |
| @@ -2913,6 +2976,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca |
| void **p) |
| { |
| struct kmem_cache_cpu *c; |
| + LIST_HEAD(to_free); |
| int i; |
| |
| /* memcg and kmem_cache debug support */ |
| @@ -2936,7 +3000,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca |
| * of re-populating per CPU c->freelist |
| */ |
| p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, |
| - _RET_IP_, c); |
| + _RET_IP_, c, &to_free); |
| if (unlikely(!p[i])) |
| goto error; |
| |
| @@ -2948,6 +3012,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca |
| } |
| c->tid = next_tid(c->tid); |
| local_irq_enable(); |
| + free_delayed(&to_free); |
| |
| /* Clear memory outside IRQ disabled fastpath loop */ |
| if (unlikely(flags & __GFP_ZERO)) { |
| @@ -3095,7 +3160,7 @@ static void |
| init_kmem_cache_node(struct kmem_cache_node *n) |
| { |
| n->nr_partial = 0; |
| - spin_lock_init(&n->list_lock); |
| + raw_spin_lock_init(&n->list_lock); |
| INIT_LIST_HEAD(&n->partial); |
| #ifdef CONFIG_SLUB_DEBUG |
| atomic_long_set(&n->nr_slabs, 0); |
| @@ -3677,7 +3742,7 @@ int __kmem_cache_shrink(struct kmem_cach |
| for (i = 0; i < SHRINK_PROMOTE_MAX; i++) |
| INIT_LIST_HEAD(promote + i); |
| |
| - spin_lock_irqsave(&n->list_lock, flags); |
| + raw_spin_lock_irqsave(&n->list_lock, flags); |
| |
| /* |
| * Build lists of slabs to discard or promote. |
| @@ -3708,7 +3773,7 @@ int __kmem_cache_shrink(struct kmem_cach |
| for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) |
| list_splice(promote + i, &n->partial); |
| |
| - spin_unlock_irqrestore(&n->list_lock, flags); |
| + raw_spin_unlock_irqrestore(&n->list_lock, flags); |
| |
| /* Release empty slabs */ |
| list_for_each_entry_safe(page, t, &discard, lru) |
| @@ -3884,6 +3949,12 @@ void __init kmem_cache_init(void) |
| { |
| static __initdata struct kmem_cache boot_kmem_cache, |
| boot_kmem_cache_node; |
| + int cpu; |
| + |
| + for_each_possible_cpu(cpu) { |
| + raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); |
| + INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); |
| + } |
| |
| if (debug_guardpage_minorder()) |
| slub_max_order = 0; |
| @@ -4127,7 +4198,7 @@ static int validate_slab_node(struct kme |
| struct page *page; |
| unsigned long flags; |
| |
| - spin_lock_irqsave(&n->list_lock, flags); |
| + raw_spin_lock_irqsave(&n->list_lock, flags); |
| |
| list_for_each_entry(page, &n->partial, lru) { |
| validate_slab_slab(s, page, map); |
| @@ -4149,7 +4220,7 @@ static int validate_slab_node(struct kme |
| s->name, count, atomic_long_read(&n->nr_slabs)); |
| |
| out: |
| - spin_unlock_irqrestore(&n->list_lock, flags); |
| + raw_spin_unlock_irqrestore(&n->list_lock, flags); |
| return count; |
| } |
| |
| @@ -4337,12 +4408,12 @@ static int list_locations(struct kmem_ca |
| if (!atomic_long_read(&n->nr_slabs)) |
| continue; |
| |
| - spin_lock_irqsave(&n->list_lock, flags); |
| + raw_spin_lock_irqsave(&n->list_lock, flags); |
| list_for_each_entry(page, &n->partial, lru) |
| process_slab(&t, s, page, alloc, map); |
| list_for_each_entry(page, &n->full, lru) |
| process_slab(&t, s, page, alloc, map); |
| - spin_unlock_irqrestore(&n->list_lock, flags); |
| + raw_spin_unlock_irqrestore(&n->list_lock, flags); |
| } |
| |
| for (i = 0; i < t.count; i++) { |