| From a0e106b941dd55f64386766669bc59fa4037f453 Mon Sep 17 00:00:00 2001 |
| From: Ingo Molnar <mingo@elte.hu> |
| Date: Fri, 3 Jul 2009 08:29:51 -0500 |
| Subject: [PATCH] mm/swap: Convert to percpu locked |
| |
| Replace global locks (get_cpu + local_irq_save) with "local_locks()". |
| Currently there is one of for "rotate" and one for "swap". |
| |
| Signed-off-by: Ingo Molnar <mingo@elte.hu> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |
| diff --git a/include/linux/swap.h b/include/linux/swap.h |
| index bcdefa817ed5..65de54b88964 100644 |
| --- a/include/linux/swap.h |
| +++ b/include/linux/swap.h |
| @@ -269,6 +269,7 @@ extern unsigned long nr_free_pagecache_pages(void); |
| |
| |
| /* linux/mm/swap.c */ |
| +DECLARE_LOCAL_IRQ_LOCK(swapvec_lock); |
| extern void lru_cache_add(struct page *); |
| extern void lru_cache_add_anon(struct page *page); |
| extern void lru_cache_add_file(struct page *page); |
| diff --git a/mm/compaction.c b/mm/compaction.c |
| index 09c5282ebdd2..caab6ed7f2b5 100644 |
| --- a/mm/compaction.c |
| +++ b/mm/compaction.c |
| @@ -1601,10 +1601,12 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro |
| block_start_pfn(cc->migrate_pfn, cc->order); |
| |
| if (cc->last_migrated_pfn < current_block_start) { |
| - cpu = get_cpu(); |
| + cpu = get_cpu_light(); |
| + local_lock_irq(swapvec_lock); |
| lru_add_drain_cpu(cpu); |
| + local_unlock_irq(swapvec_lock); |
| drain_local_pages(zone); |
| - put_cpu(); |
| + put_cpu_light(); |
| /* No more flushing until we migrate again */ |
| cc->last_migrated_pfn = 0; |
| } |
| diff --git a/mm/page_alloc.c b/mm/page_alloc.c |
| index eb8105747829..045a41ab88ce 100644 |
| --- a/mm/page_alloc.c |
| +++ b/mm/page_alloc.c |
| @@ -6747,8 +6747,9 @@ void __init free_area_init(unsigned long *zones_size) |
| |
| static int page_alloc_cpu_dead(unsigned int cpu) |
| { |
| - |
| + local_lock_irq_on(swapvec_lock, cpu); |
| lru_add_drain_cpu(cpu); |
| + local_unlock_irq_on(swapvec_lock, cpu); |
| drain_pages(cpu); |
| |
| /* |
| diff --git a/mm/swap.c b/mm/swap.c |
| index 98d08b4579fa..e81fddc352f9 100644 |
| --- a/mm/swap.c |
| +++ b/mm/swap.c |
| @@ -32,6 +32,7 @@ |
| #include <linux/memcontrol.h> |
| #include <linux/gfp.h> |
| #include <linux/uio.h> |
| +#include <linux/locallock.h> |
| #include <linux/hugetlb.h> |
| #include <linux/page_idle.h> |
| |
| @@ -50,6 +51,8 @@ static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs); |
| #ifdef CONFIG_SMP |
| static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); |
| #endif |
| +static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); |
| +DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); |
| |
| /* |
| * This path almost never happens for VM activity - pages are normally |
| @@ -252,11 +255,11 @@ void rotate_reclaimable_page(struct page *page) |
| unsigned long flags; |
| |
| get_page(page); |
| - local_irq_save(flags); |
| + local_lock_irqsave(rotate_lock, flags); |
| pvec = this_cpu_ptr(&lru_rotate_pvecs); |
| if (!pagevec_add(pvec, page) || PageCompound(page)) |
| pagevec_move_tail(pvec); |
| - local_irq_restore(flags); |
| + local_unlock_irqrestore(rotate_lock, flags); |
| } |
| } |
| |
| @@ -306,12 +309,13 @@ void activate_page(struct page *page) |
| { |
| page = compound_head(page); |
| if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
| - struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); |
| + struct pagevec *pvec = &get_locked_var(swapvec_lock, |
| + activate_page_pvecs); |
| |
| get_page(page); |
| if (!pagevec_add(pvec, page) || PageCompound(page)) |
| pagevec_lru_move_fn(pvec, __activate_page, NULL); |
| - put_cpu_var(activate_page_pvecs); |
| + put_locked_var(swapvec_lock, activate_page_pvecs); |
| } |
| } |
| |
| @@ -338,7 +342,7 @@ void activate_page(struct page *page) |
| |
| static void __lru_cache_activate_page(struct page *page) |
| { |
| - struct pagevec *pvec = &get_cpu_var(lru_add_pvec); |
| + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); |
| int i; |
| |
| /* |
| @@ -360,7 +364,7 @@ static void __lru_cache_activate_page(struct page *page) |
| } |
| } |
| |
| - put_cpu_var(lru_add_pvec); |
| + put_locked_var(swapvec_lock, lru_add_pvec); |
| } |
| |
| /* |
| @@ -402,12 +406,12 @@ EXPORT_SYMBOL(mark_page_accessed); |
| |
| static void __lru_cache_add(struct page *page) |
| { |
| - struct pagevec *pvec = &get_cpu_var(lru_add_pvec); |
| + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); |
| |
| get_page(page); |
| if (!pagevec_add(pvec, page) || PageCompound(page)) |
| __pagevec_lru_add(pvec); |
| - put_cpu_var(lru_add_pvec); |
| + put_locked_var(swapvec_lock, lru_add_pvec); |
| } |
| |
| /** |
| @@ -612,9 +616,9 @@ void lru_add_drain_cpu(int cpu) |
| unsigned long flags; |
| |
| /* No harm done if a racing interrupt already did this */ |
| - local_irq_save(flags); |
| + local_lock_irqsave(rotate_lock, flags); |
| pagevec_move_tail(pvec); |
| - local_irq_restore(flags); |
| + local_unlock_irqrestore(rotate_lock, flags); |
| } |
| |
| pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); |
| @@ -646,11 +650,12 @@ void deactivate_file_page(struct page *page) |
| return; |
| |
| if (likely(get_page_unless_zero(page))) { |
| - struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); |
| + struct pagevec *pvec = &get_locked_var(swapvec_lock, |
| + lru_deactivate_file_pvecs); |
| |
| if (!pagevec_add(pvec, page) || PageCompound(page)) |
| pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); |
| - put_cpu_var(lru_deactivate_file_pvecs); |
| + put_locked_var(swapvec_lock, lru_deactivate_file_pvecs); |
| } |
| } |
| |
| @@ -665,19 +670,20 @@ void mark_page_lazyfree(struct page *page) |
| { |
| if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && |
| !PageUnevictable(page)) { |
| - struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs); |
| + struct pagevec *pvec = &get_locked_var(swapvec_lock, |
| + lru_lazyfree_pvecs); |
| |
| get_page(page); |
| if (!pagevec_add(pvec, page) || PageCompound(page)) |
| pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); |
| - put_cpu_var(lru_lazyfree_pvecs); |
| + put_locked_var(swapvec_lock, lru_lazyfree_pvecs); |
| } |
| } |
| |
| void lru_add_drain(void) |
| { |
| - lru_add_drain_cpu(get_cpu()); |
| - put_cpu(); |
| + lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); |
| + local_unlock_cpu(swapvec_lock); |
| } |
| |
| static void lru_add_drain_per_cpu(struct work_struct *dummy) |
| -- |
| 2.1.4 |
| |