blob: 506ced57245392620cfeaad51c32e8b5c2906a27 [file] [log] [blame]
From 61933a6bdac50e528426369a1180243e5d112b79 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 28 Jan 2015 17:14:16 +0100
Subject: [PATCH] mm/memcontrol: Replace local_irq_disable with local locks
There are a few local_irq_disable() which then take sleeping locks. This
patch converts them local locks.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 135a10f2b801..eaf0be517d6f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -67,6 +67,7 @@
#include <net/sock.h>
#include <net/ip.h>
#include "slab.h"
+#include <linux/locallock.h>
#include <asm/uaccess.h>
@@ -92,6 +93,8 @@ int do_swap_account __read_mostly;
#define do_swap_account 0
#endif
+static DEFINE_LOCAL_IRQ_LOCK(event_lock);
+
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
@@ -4571,12 +4574,12 @@ static int mem_cgroup_move_account(struct page *page,
ret = 0;
- local_irq_disable();
+ local_lock_irq(event_lock);
mem_cgroup_charge_statistics(to, page, compound, nr_pages);
memcg_check_events(to, page);
mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
memcg_check_events(from, page);
- local_irq_enable();
+ local_unlock_irq(event_lock);
out_unlock:
unlock_page(page);
out:
@@ -5449,10 +5452,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
commit_charge(page, memcg, lrucare);
- local_irq_disable();
+ local_lock_irq(event_lock);
mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
memcg_check_events(memcg, page);
- local_irq_enable();
+ local_unlock_irq(event_lock);
if (do_memsw_account() && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
@@ -5508,14 +5511,14 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
memcg_oom_recover(memcg);
}
- local_irq_save(flags);
+ local_lock_irqsave(event_lock, flags);
__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
__this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
memcg_check_events(memcg, dummy_page);
- local_irq_restore(flags);
+ local_unlock_irqrestore(event_lock, flags);
if (!mem_cgroup_is_root(memcg))
css_put_many(&memcg->css, nr_pages);
@@ -5832,6 +5835,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
{
struct mem_cgroup *memcg, *swap_memcg;
unsigned short oldid;
+ unsigned long flags;
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
@@ -5872,12 +5876,16 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
* important here to have the interrupts disabled because it is the
* only synchronisation we have for udpating the per-CPU variables.
*/
+ local_lock_irqsave(event_lock, flags);
+#ifndef CONFIG_PREEMPT_RT_BASE
VM_BUG_ON(!irqs_disabled());
+#endif
mem_cgroup_charge_statistics(memcg, page, false, -1);
memcg_check_events(memcg, page);
if (!mem_cgroup_is_root(memcg))
css_put(&memcg->css);
+ local_unlock_irqrestore(event_lock, flags);
}
/*
--
2.5.0