| From: Ingo Molnar <mingo@elte.hu> |
| Date: Fri, 3 Jul 2009 08:29:51 -0500 |
| Subject: mm/swap: Convert to percpu locked |
| |
| Replace global locks (get_cpu + local_irq_save) with "local_locks()". |
| Currently there is one of for "rotate" and one for "swap". |
| |
| Signed-off-by: Ingo Molnar <mingo@elte.hu> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |
| --- |
| include/linux/swap.h | 1 + |
| mm/compaction.c | 6 ++++-- |
| mm/page_alloc.c | 2 ++ |
| mm/swap.c | 39 +++++++++++++++++++++++---------------- |
| 4 files changed, 30 insertions(+), 18 deletions(-) |
| |
| --- a/include/linux/swap.h |
| +++ b/include/linux/swap.h |
| @@ -297,6 +297,7 @@ extern unsigned long nr_free_pagecache_p |
| |
| |
| /* linux/mm/swap.c */ |
| +DECLARE_LOCAL_IRQ_LOCK(swapvec_lock); |
| extern void lru_cache_add(struct page *); |
| extern void lru_cache_add_anon(struct page *page); |
| extern void lru_cache_add_file(struct page *page); |
| --- a/mm/compaction.c |
| +++ b/mm/compaction.c |
| @@ -1409,10 +1409,12 @@ static int compact_zone(struct zone *zon |
| cc->migrate_pfn & ~((1UL << cc->order) - 1); |
| |
| if (cc->last_migrated_pfn < current_block_start) { |
| - cpu = get_cpu(); |
| + cpu = get_cpu_light(); |
| + local_lock_irq(swapvec_lock); |
| lru_add_drain_cpu(cpu); |
| + local_unlock_irq(swapvec_lock); |
| drain_local_pages(zone); |
| - put_cpu(); |
| + put_cpu_light(); |
| /* No more flushing until we migrate again */ |
| cc->last_migrated_pfn = 0; |
| } |
| --- a/mm/page_alloc.c |
| +++ b/mm/page_alloc.c |
| @@ -6276,7 +6276,9 @@ static int page_alloc_cpu_notify(struct |
| int cpu = (unsigned long)hcpu; |
| |
| if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { |
| + local_lock_irq_on(swapvec_lock, cpu); |
| lru_add_drain_cpu(cpu); |
| + local_unlock_irq_on(swapvec_lock, cpu); |
| drain_pages(cpu); |
| |
| /* |
| --- a/mm/swap.c |
| +++ b/mm/swap.c |
| @@ -32,6 +32,7 @@ |
| #include <linux/memcontrol.h> |
| #include <linux/gfp.h> |
| #include <linux/uio.h> |
| +#include <linux/locallock.h> |
| #include <linux/hugetlb.h> |
| #include <linux/page_idle.h> |
| |
| @@ -48,6 +49,9 @@ static DEFINE_PER_CPU(struct pagevec, lr |
| static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs); |
| static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); |
| |
| +static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); |
| +DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); |
| + |
| /* |
| * This path almost never happens for VM activity - pages are normally |
| * freed via pagevecs. But it gets used by networking. |
| @@ -237,11 +241,11 @@ void rotate_reclaimable_page(struct page |
| unsigned long flags; |
| |
| get_page(page); |
| - local_irq_save(flags); |
| + local_lock_irqsave(rotate_lock, flags); |
| pvec = this_cpu_ptr(&lru_rotate_pvecs); |
| if (!pagevec_add(pvec, page) || PageCompound(page)) |
| pagevec_move_tail(pvec); |
| - local_irq_restore(flags); |
| + local_unlock_irqrestore(rotate_lock, flags); |
| } |
| } |
| |
| @@ -292,12 +296,13 @@ static bool need_activate_page_drain(int |
| void activate_page(struct page *page) |
| { |
| if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
| - struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); |
| + struct pagevec *pvec = &get_locked_var(swapvec_lock, |
| + activate_page_pvecs); |
| |
| get_page(page); |
| if (!pagevec_add(pvec, page) || PageCompound(page)) |
| pagevec_lru_move_fn(pvec, __activate_page, NULL); |
| - put_cpu_var(activate_page_pvecs); |
| + put_locked_var(swapvec_lock, activate_page_pvecs); |
| } |
| } |
| |
| @@ -323,7 +328,7 @@ void activate_page(struct page *page) |
| |
| static void __lru_cache_activate_page(struct page *page) |
| { |
| - struct pagevec *pvec = &get_cpu_var(lru_add_pvec); |
| + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); |
| int i; |
| |
| /* |
| @@ -345,7 +350,7 @@ static void __lru_cache_activate_page(st |
| } |
| } |
| |
| - put_cpu_var(lru_add_pvec); |
| + put_locked_var(swapvec_lock, lru_add_pvec); |
| } |
| |
| /* |
| @@ -387,12 +392,12 @@ EXPORT_SYMBOL(mark_page_accessed); |
| |
| static void __lru_cache_add(struct page *page) |
| { |
| - struct pagevec *pvec = &get_cpu_var(lru_add_pvec); |
| + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); |
| |
| get_page(page); |
| if (!pagevec_add(pvec, page) || PageCompound(page)) |
| __pagevec_lru_add(pvec); |
| - put_cpu_var(lru_add_pvec); |
| + put_locked_var(swapvec_lock, lru_add_pvec); |
| } |
| |
| /** |
| @@ -590,9 +595,9 @@ void lru_add_drain_cpu(int cpu) |
| unsigned long flags; |
| |
| /* No harm done if a racing interrupt already did this */ |
| - local_irq_save(flags); |
| + local_lock_irqsave(rotate_lock, flags); |
| pagevec_move_tail(pvec); |
| - local_irq_restore(flags); |
| + local_unlock_irqrestore(rotate_lock, flags); |
| } |
| |
| pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); |
| @@ -624,11 +629,12 @@ void deactivate_file_page(struct page *p |
| return; |
| |
| if (likely(get_page_unless_zero(page))) { |
| - struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); |
| + struct pagevec *pvec = &get_locked_var(swapvec_lock, |
| + lru_deactivate_file_pvecs); |
| |
| if (!pagevec_add(pvec, page) || PageCompound(page)) |
| pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); |
| - put_cpu_var(lru_deactivate_file_pvecs); |
| + put_locked_var(swapvec_lock, lru_deactivate_file_pvecs); |
| } |
| } |
| |
| @@ -643,19 +649,20 @@ void deactivate_file_page(struct page *p |
| void deactivate_page(struct page *page) |
| { |
| if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { |
| - struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); |
| + struct pagevec *pvec = &get_locked_var(swapvec_lock, |
| + lru_deactivate_pvecs); |
| |
| get_page(page); |
| if (!pagevec_add(pvec, page) || PageCompound(page)) |
| pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); |
| - put_cpu_var(lru_deactivate_pvecs); |
| + put_locked_var(swapvec_lock, lru_deactivate_pvecs); |
| } |
| } |
| |
| void lru_add_drain(void) |
| { |
| - lru_add_drain_cpu(get_cpu()); |
| - put_cpu(); |
| + lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); |
| + local_unlock_cpu(swapvec_lock); |
| } |
| |
| static void lru_add_drain_per_cpu(struct work_struct *dummy) |