| From 0f884125071d0f90dbd6927194f28d8ce0865325 Mon Sep 17 00:00:00 2001 |
| From: Ingo Molnar <mingo@elte.hu> |
| Date: Fri, 3 Jul 2009 08:29:51 -0500 |
| Subject: [PATCH] mm/swap: Convert to percpu locked |
| |
| Replace global locks (get_cpu + local_irq_save) with "local_locks()". |
| Currently there is one of for "rotate" and one for "swap". |
| |
| Signed-off-by: Ingo Molnar <mingo@elte.hu> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |
| diff --git a/include/linux/swap.h b/include/linux/swap.h |
| index 98d34df40058..6df838de7d78 100644 |
| --- a/include/linux/swap.h |
| +++ b/include/linux/swap.h |
| @@ -297,6 +297,7 @@ extern unsigned long nr_free_pagecache_pages(void); |
| |
| |
| /* linux/mm/swap.c */ |
| +DECLARE_LOCAL_IRQ_LOCK(swapvec_lock); |
| extern void lru_cache_add(struct page *); |
| extern void lru_cache_add_anon(struct page *page); |
| extern void lru_cache_add_file(struct page *page); |
| diff --git a/mm/compaction.c b/mm/compaction.c |
| index 64df5fe052db..b04e2a713d55 100644 |
| --- a/mm/compaction.c |
| +++ b/mm/compaction.c |
| @@ -1585,10 +1585,12 @@ check_drain: |
| block_start_pfn(cc->migrate_pfn, cc->order); |
| |
| if (cc->last_migrated_pfn < current_block_start) { |
| - cpu = get_cpu(); |
| + cpu = get_cpu_light(); |
| + local_lock_irq(swapvec_lock); |
| lru_add_drain_cpu(cpu); |
| + local_unlock_irq(swapvec_lock); |
| drain_local_pages(zone); |
| - put_cpu(); |
| + put_cpu_light(); |
| /* No more flushing until we migrate again */ |
| cc->last_migrated_pfn = 0; |
| } |
| diff --git a/mm/page_alloc.c b/mm/page_alloc.c |
| index 5e8e3b80f74a..9d439382bf73 100644 |
| --- a/mm/page_alloc.c |
| +++ b/mm/page_alloc.c |
| @@ -6720,7 +6720,9 @@ static int page_alloc_cpu_notify(struct notifier_block *self, |
| int cpu = (unsigned long)hcpu; |
| |
| if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { |
| + local_lock_irq_on(swapvec_lock, cpu); |
| lru_add_drain_cpu(cpu); |
| + local_unlock_irq_on(swapvec_lock, cpu); |
| drain_pages(cpu); |
| |
| /* |
| diff --git a/mm/swap.c b/mm/swap.c |
| index 616df4ddd870..2ace7834e795 100644 |
| --- a/mm/swap.c |
| +++ b/mm/swap.c |
| @@ -32,6 +32,7 @@ |
| #include <linux/memcontrol.h> |
| #include <linux/gfp.h> |
| #include <linux/uio.h> |
| +#include <linux/locallock.h> |
| #include <linux/hugetlb.h> |
| #include <linux/page_idle.h> |
| |
| @@ -51,6 +52,9 @@ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); |
| static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); |
| #endif |
| |
| +static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); |
| +DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); |
| + |
| /* |
| * This path almost never happens for VM activity - pages are normally |
| * freed via pagevecs. But it gets used by networking. |
| @@ -240,11 +244,11 @@ void rotate_reclaimable_page(struct page *page) |
| unsigned long flags; |
| |
| get_page(page); |
| - local_irq_save(flags); |
| + local_lock_irqsave(rotate_lock, flags); |
| pvec = this_cpu_ptr(&lru_rotate_pvecs); |
| if (!pagevec_add(pvec, page) || PageCompound(page)) |
| pagevec_move_tail(pvec); |
| - local_irq_restore(flags); |
| + local_unlock_irqrestore(rotate_lock, flags); |
| } |
| } |
| |
| @@ -294,12 +298,13 @@ void activate_page(struct page *page) |
| { |
| page = compound_head(page); |
| if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
| - struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); |
| + struct pagevec *pvec = &get_locked_var(swapvec_lock, |
| + activate_page_pvecs); |
| |
| get_page(page); |
| if (!pagevec_add(pvec, page) || PageCompound(page)) |
| pagevec_lru_move_fn(pvec, __activate_page, NULL); |
| - put_cpu_var(activate_page_pvecs); |
| + put_locked_var(swapvec_lock, activate_page_pvecs); |
| } |
| } |
| |
| @@ -326,7 +331,7 @@ void activate_page(struct page *page) |
| |
| static void __lru_cache_activate_page(struct page *page) |
| { |
| - struct pagevec *pvec = &get_cpu_var(lru_add_pvec); |
| + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); |
| int i; |
| |
| /* |
| @@ -348,7 +353,7 @@ static void __lru_cache_activate_page(struct page *page) |
| } |
| } |
| |
| - put_cpu_var(lru_add_pvec); |
| + put_locked_var(swapvec_lock, lru_add_pvec); |
| } |
| |
| /* |
| @@ -390,12 +395,12 @@ EXPORT_SYMBOL(mark_page_accessed); |
| |
| static void __lru_cache_add(struct page *page) |
| { |
| - struct pagevec *pvec = &get_cpu_var(lru_add_pvec); |
| + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); |
| |
| get_page(page); |
| if (!pagevec_add(pvec, page) || PageCompound(page)) |
| __pagevec_lru_add(pvec); |
| - put_cpu_var(lru_add_pvec); |
| + put_locked_var(swapvec_lock, lru_add_pvec); |
| } |
| |
| /** |
| @@ -593,9 +598,9 @@ void lru_add_drain_cpu(int cpu) |
| unsigned long flags; |
| |
| /* No harm done if a racing interrupt already did this */ |
| - local_irq_save(flags); |
| + local_lock_irqsave(rotate_lock, flags); |
| pagevec_move_tail(pvec); |
| - local_irq_restore(flags); |
| + local_unlock_irqrestore(rotate_lock, flags); |
| } |
| |
| pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); |
| @@ -627,11 +632,12 @@ void deactivate_file_page(struct page *page) |
| return; |
| |
| if (likely(get_page_unless_zero(page))) { |
| - struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); |
| + struct pagevec *pvec = &get_locked_var(swapvec_lock, |
| + lru_deactivate_file_pvecs); |
| |
| if (!pagevec_add(pvec, page) || PageCompound(page)) |
| pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); |
| - put_cpu_var(lru_deactivate_file_pvecs); |
| + put_locked_var(swapvec_lock, lru_deactivate_file_pvecs); |
| } |
| } |
| |
| @@ -646,19 +652,20 @@ void deactivate_file_page(struct page *page) |
| void deactivate_page(struct page *page) |
| { |
| if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { |
| - struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); |
| + struct pagevec *pvec = &get_locked_var(swapvec_lock, |
| + lru_deactivate_pvecs); |
| |
| get_page(page); |
| if (!pagevec_add(pvec, page) || PageCompound(page)) |
| pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); |
| - put_cpu_var(lru_deactivate_pvecs); |
| + put_locked_var(swapvec_lock, lru_deactivate_pvecs); |
| } |
| } |
| |
| void lru_add_drain(void) |
| { |
| - lru_add_drain_cpu(get_cpu()); |
| - put_cpu(); |
| + lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); |
| + local_unlock_cpu(swapvec_lock); |
| } |
| |
| static void lru_add_drain_per_cpu(struct work_struct *dummy) |
| -- |
| 2.5.0 |
| |