| From: Ingo Molnar <mingo@elte.hu> |
| Date: Fri, 3 Jul 2009 08:29:37 -0500 |
| Subject: mm: page_alloc: rt-friendly per-cpu pages |
| |
| rt-friendly per-cpu pages: convert the irqs-off per-cpu locking |
| method into a preemptible, explicit-per-cpu-locks method. |
| |
| Contains fixes from: |
| Peter Zijlstra <a.p.zijlstra@chello.nl> |
| Thomas Gleixner <tglx@linutronix.de> |
| |
| Signed-off-by: Ingo Molnar <mingo@elte.hu> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| --- |
| mm/page_alloc.c | 57 ++++++++++++++++++++++++++++++++++++++++---------------- |
| 1 file changed, 41 insertions(+), 16 deletions(-) |
| |
| --- a/mm/page_alloc.c |
| +++ b/mm/page_alloc.c |
| @@ -61,6 +61,7 @@ |
| #include <linux/page_ext.h> |
| #include <linux/hugetlb.h> |
| #include <linux/sched/rt.h> |
| +#include <linux/locallock.h> |
| #include <linux/page_owner.h> |
| #include <linux/kthread.h> |
| #include <linux/memcontrol.h> |
| @@ -276,6 +277,18 @@ EXPORT_SYMBOL(nr_node_ids); |
| EXPORT_SYMBOL(nr_online_nodes); |
| #endif |
| |
| +static DEFINE_LOCAL_IRQ_LOCK(pa_lock); |
| + |
| +#ifdef CONFIG_PREEMPT_RT_BASE |
| +# define cpu_lock_irqsave(cpu, flags) \ |
| + spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags) |
| +# define cpu_unlock_irqrestore(cpu, flags) \ |
| + spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags) |
| +#else |
| +# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) |
| +# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) |
| +#endif |
| + |
| int page_group_by_mobility_disabled __read_mostly; |
| |
| #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
| @@ -1228,10 +1241,10 @@ static void __free_pages_ok(struct page |
| return; |
| |
| migratetype = get_pfnblock_migratetype(page, pfn); |
| - local_irq_save(flags); |
| + local_lock_irqsave(pa_lock, flags); |
| __count_vm_events(PGFREE, 1 << order); |
| free_one_page(page_zone(page), page, pfn, order, migratetype); |
| - local_irq_restore(flags); |
| + local_unlock_irqrestore(pa_lock, flags); |
| } |
| |
| static void __init __free_pages_boot_core(struct page *page, unsigned int order) |
| @@ -2221,14 +2234,14 @@ void drain_zone_pages(struct zone *zone, |
| unsigned long flags; |
| int to_drain, batch; |
| |
| - local_irq_save(flags); |
| + local_lock_irqsave(pa_lock, flags); |
| batch = READ_ONCE(pcp->batch); |
| to_drain = min(pcp->count, batch); |
| if (to_drain > 0) { |
| free_pcppages_bulk(zone, to_drain, pcp); |
| pcp->count -= to_drain; |
| } |
| - local_irq_restore(flags); |
| + local_unlock_irqrestore(pa_lock, flags); |
| } |
| #endif |
| |
| @@ -2245,7 +2258,7 @@ static void drain_pages_zone(unsigned in |
| struct per_cpu_pageset *pset; |
| struct per_cpu_pages *pcp; |
| |
| - local_irq_save(flags); |
| + cpu_lock_irqsave(cpu, flags); |
| pset = per_cpu_ptr(zone->pageset, cpu); |
| |
| pcp = &pset->pcp; |
| @@ -2253,7 +2266,7 @@ static void drain_pages_zone(unsigned in |
| free_pcppages_bulk(zone, pcp->count, pcp); |
| pcp->count = 0; |
| } |
| - local_irq_restore(flags); |
| + cpu_unlock_irqrestore(cpu, flags); |
| } |
| |
| /* |
| @@ -2339,8 +2352,17 @@ void drain_all_pages(struct zone *zone) |
| else |
| cpumask_clear_cpu(cpu, &cpus_with_pcps); |
| } |
| +#ifndef CONFIG_PREEMPT_RT_BASE |
| on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages, |
| zone, 1); |
| +#else |
| + for_each_cpu(cpu, &cpus_with_pcps) { |
| + if (zone) |
| + drain_pages_zone(cpu, zone); |
| + else |
| + drain_pages(cpu); |
| + } |
| +#endif |
| } |
| |
| #ifdef CONFIG_HIBERNATION |
| @@ -2400,7 +2422,7 @@ void free_hot_cold_page(struct page *pag |
| |
| migratetype = get_pfnblock_migratetype(page, pfn); |
| set_pcppage_migratetype(page, migratetype); |
| - local_irq_save(flags); |
| + local_lock_irqsave(pa_lock, flags); |
| __count_vm_event(PGFREE); |
| |
| /* |
| @@ -2431,7 +2453,7 @@ void free_hot_cold_page(struct page *pag |
| } |
| |
| out: |
| - local_irq_restore(flags); |
| + local_unlock_irqrestore(pa_lock, flags); |
| } |
| |
| /* |
| @@ -2568,7 +2590,7 @@ struct page *buffered_rmqueue(struct zon |
| struct per_cpu_pages *pcp; |
| struct list_head *list; |
| |
| - local_irq_save(flags); |
| + local_lock_irqsave(pa_lock, flags); |
| do { |
| pcp = &this_cpu_ptr(zone->pageset)->pcp; |
| list = &pcp->lists[migratetype]; |
| @@ -2595,7 +2617,7 @@ struct page *buffered_rmqueue(struct zon |
| * allocate greater than order-1 page units with __GFP_NOFAIL. |
| */ |
| WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); |
| - spin_lock_irqsave(&zone->lock, flags); |
| + local_spin_lock_irqsave(pa_lock, &zone->lock, flags); |
| |
| do { |
| page = NULL; |
| @@ -2607,22 +2629,24 @@ struct page *buffered_rmqueue(struct zon |
| if (!page) |
| page = __rmqueue(zone, order, migratetype); |
| } while (page && check_new_pages(page, order)); |
| - spin_unlock(&zone->lock); |
| - if (!page) |
| + if (!page) { |
| + spin_unlock(&zone->lock); |
| goto failed; |
| + } |
| __mod_zone_freepage_state(zone, -(1 << order), |
| get_pcppage_migratetype(page)); |
| + spin_unlock(&zone->lock); |
| } |
| |
| __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); |
| zone_statistics(preferred_zone, zone, gfp_flags); |
| - local_irq_restore(flags); |
| + local_unlock_irqrestore(pa_lock, flags); |
| |
| VM_BUG_ON_PAGE(bad_range(zone, page), page); |
| return page; |
| |
| failed: |
| - local_irq_restore(flags); |
| + local_unlock_irqrestore(pa_lock, flags); |
| return NULL; |
| } |
| |
| @@ -6564,6 +6588,7 @@ static int page_alloc_cpu_notify(struct |
| void __init page_alloc_init(void) |
| { |
| hotcpu_notifier(page_alloc_cpu_notify, 0); |
| + local_irq_lock_init(pa_lock); |
| } |
| |
| /* |
| @@ -7380,7 +7405,7 @@ void zone_pcp_reset(struct zone *zone) |
| struct per_cpu_pageset *pset; |
| |
| /* avoid races with drain_pages() */ |
| - local_irq_save(flags); |
| + local_lock_irqsave(pa_lock, flags); |
| if (zone->pageset != &boot_pageset) { |
| for_each_online_cpu(cpu) { |
| pset = per_cpu_ptr(zone->pageset, cpu); |
| @@ -7389,7 +7414,7 @@ void zone_pcp_reset(struct zone *zone) |
| free_percpu(zone->pageset); |
| zone->pageset = &boot_pageset; |
| } |
| - local_irq_restore(flags); |
| + local_unlock_irqrestore(pa_lock, flags); |
| } |
| |
| #ifdef CONFIG_MEMORY_HOTREMOVE |