| From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Subject: mm/memcontrol: Replace local_irq_disable with local locks |
| Date: Wed, 28 Jan 2015 17:14:16 +0100 |
| |
| There are a few local_irq_disable() which then take sleeping locks. This |
| patch converts them local locks. |
| |
| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| --- |
| mm/memcontrol.c | 20 ++++++++++++++------ |
| 1 file changed, 14 insertions(+), 6 deletions(-) |
| |
| --- a/mm/memcontrol.c |
| +++ b/mm/memcontrol.c |
| @@ -67,6 +67,7 @@ |
| #include <net/sock.h> |
| #include <net/ip.h> |
| #include "slab.h" |
| +#include <linux/locallock.h> |
| |
| #include <asm/uaccess.h> |
| |
| @@ -92,6 +93,8 @@ int do_swap_account __read_mostly; |
| #define do_swap_account 0 |
| #endif |
| |
| +static DEFINE_LOCAL_IRQ_LOCK(event_lock); |
| + |
| /* Whether legacy memory+swap accounting is active */ |
| static bool do_memsw_account(void) |
| { |
| @@ -4575,12 +4578,12 @@ static int mem_cgroup_move_account(struc |
| |
| ret = 0; |
| |
| - local_irq_disable(); |
| + local_lock_irq(event_lock); |
| mem_cgroup_charge_statistics(to, page, compound, nr_pages); |
| memcg_check_events(to, page); |
| mem_cgroup_charge_statistics(from, page, compound, -nr_pages); |
| memcg_check_events(from, page); |
| - local_irq_enable(); |
| + local_unlock_irq(event_lock); |
| out_unlock: |
| unlock_page(page); |
| out: |
| @@ -5453,10 +5456,10 @@ void mem_cgroup_commit_charge(struct pag |
| |
| commit_charge(page, memcg, lrucare); |
| |
| - local_irq_disable(); |
| + local_lock_irq(event_lock); |
| mem_cgroup_charge_statistics(memcg, page, compound, nr_pages); |
| memcg_check_events(memcg, page); |
| - local_irq_enable(); |
| + local_unlock_irq(event_lock); |
| |
| if (do_memsw_account() && PageSwapCache(page)) { |
| swp_entry_t entry = { .val = page_private(page) }; |
| @@ -5512,14 +5515,14 @@ static void uncharge_batch(struct mem_cg |
| memcg_oom_recover(memcg); |
| } |
| |
| - local_irq_save(flags); |
| + local_lock_irqsave(event_lock, flags); |
| __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); |
| __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); |
| __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); |
| __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); |
| __this_cpu_add(memcg->stat->nr_page_events, nr_pages); |
| memcg_check_events(memcg, dummy_page); |
| - local_irq_restore(flags); |
| + local_unlock_irqrestore(event_lock, flags); |
| |
| if (!mem_cgroup_is_root(memcg)) |
| css_put_many(&memcg->css, nr_pages); |
| @@ -5854,6 +5857,7 @@ void mem_cgroup_swapout(struct page *pag |
| { |
| struct mem_cgroup *memcg, *swap_memcg; |
| unsigned short oldid; |
| + unsigned long flags; |
| |
| VM_BUG_ON_PAGE(PageLRU(page), page); |
| VM_BUG_ON_PAGE(page_count(page), page); |
| @@ -5894,12 +5898,16 @@ void mem_cgroup_swapout(struct page *pag |
| * important here to have the interrupts disabled because it is the |
| * only synchronisation we have for udpating the per-CPU variables. |
| */ |
| + local_lock_irqsave(event_lock, flags); |
| +#ifndef CONFIG_PREEMPT_RT_BASE |
| VM_BUG_ON(!irqs_disabled()); |
| +#endif |
| mem_cgroup_charge_statistics(memcg, page, false, -1); |
| memcg_check_events(memcg, page); |
| |
| if (!mem_cgroup_is_root(memcg)) |
| css_put(&memcg->css); |
| + local_unlock_irqrestore(event_lock, flags); |
| } |
| |
| /* |