cgroups: fixup refill code after mainline change
diff --git a/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch b/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
deleted file mode 100644
index a61c501..0000000
--- a/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
+++ /dev/null
@@ -1,64 +0,0 @@
-From: Mike Galbraith <umgwanakikbuti@gmail.com>
-Date: Sat, 21 Jun 2014 10:09:48 +0200
-Subject: memcontrol: Prevent scheduling while atomic in cgroup code
-
-mm, memcg: make refill_stock() use get_cpu_light()
-
-Nikita reported the following memcg scheduling while atomic bug:
-
-Call Trace:
-[e22d5a90] [c0007ea8] show_stack+0x4c/0x168 (unreliable)
-[e22d5ad0] [c0618c04] __schedule_bug+0x94/0xb0
-[e22d5ae0] [c060b9ec] __schedule+0x530/0x550
-[e22d5bf0] [c060bacc] schedule+0x30/0xbc
-[e22d5c00] [c060ca24] rt_spin_lock_slowlock+0x180/0x27c
-[e22d5c70] [c00b39dc] res_counter_uncharge_until+0x40/0xc4
-[e22d5ca0] [c013ca88] drain_stock.isra.20+0x54/0x98
-[e22d5cc0] [c01402ac] __mem_cgroup_try_charge+0x2e8/0xbac
-[e22d5d70] [c01410d4] mem_cgroup_charge_common+0x3c/0x70
-[e22d5d90] [c0117284] __do_fault+0x38c/0x510
-[e22d5df0] [c011a5f4] handle_pte_fault+0x98/0x858
-[e22d5e50] [c060ed08] do_page_fault+0x42c/0x6fc
-[e22d5f40] [c000f5b4] handle_page_fault+0xc/0x80
-
-What happens:
-
-   refill_stock()
-      get_cpu_var()
-      drain_stock()
-         res_counter_uncharge()
-            res_counter_uncharge_until()
-               spin_lock() <== boom
-
-Fix it by replacing get/put_cpu_var() with get/put_cpu_light().
-
-
-Reported-by: Nikita Yushchenko <nyushchenko@dev.rtsoft.ru>
-Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- mm/memcontrol.c |    7 +++++--
- 1 file changed, 5 insertions(+), 2 deletions(-)
-
---- a/mm/memcontrol.c
-+++ b/mm/memcontrol.c
-@@ -1828,14 +1828,17 @@ static void drain_local_stock(struct wor
-  */
- static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
- {
--	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
-+	struct memcg_stock_pcp *stock;
-+	int cpu = get_cpu_light();
-+
-+	stock = &per_cpu(memcg_stock, cpu);
- 
- 	if (stock->cached != memcg) { /* reset if necessary */
- 		drain_stock(stock);
- 		stock->cached = memcg;
- 	}
- 	stock->nr_pages += nr_pages;
--	put_cpu_var(memcg_stock);
-+	put_cpu_light();
- }
- 
- /*
diff --git a/patches/mm-memcontrol-consume_stock-replace-local_irq_disabl.patch b/patches/mm-memcontrol-consume_stock-replace-local_irq_disabl.patch
index 1273f9e..bb7f806 100644
--- a/patches/mm-memcontrol-consume_stock-replace-local_irq_disabl.patch
+++ b/patches/mm-memcontrol-consume_stock-replace-local_irq_disabl.patch
@@ -1,21 +1,26 @@
-From 5ded5e3e460cb78009447bb6f7f8d41b45eceb50 Mon Sep 17 00:00:00 2001
+From f38aa5f349ab8e78471d1f8ea0d04821af67e741 Mon Sep 17 00:00:00 2001
 From: Paul Gortmaker <paul.gortmaker@windriver.com>
 Date: Sun, 5 Jun 2016 08:11:13 +0200
-Subject: [PATCH] mm/memcontrol: consume_stock() - replace local_irq_disable()
- w. local_lock_irq()
+Subject: [PATCH] mm/memcontrol: consume/refill stock - local_irq_disable -->
+ local_lock_irq
 
-v4.8 grew a local_irq_disable() in mm/memcontrol.c::consume_stock().
-Convert it to use the existing local lock (event_lock) like the others.
+v4.8 grew more local_irq_disable() in mm/memcontrol.c
+
+Convert them to use the existing local_lock(event_lock) like the others.
 
 Based on similar patch by Mike Galbraith <umgwanakikbuti@gmail.com>
 
 Upstream commit is db2ba40c277dc ("mm: memcontrol: make per-cpu charge
 cache IRQ-safe for socket accounting")
 
+This also replaces the old -rt patch:
+    cgroups-scheduling-while-atomic-in-cgroup-code.patch
+that used cpu light in the refill path.
+
 Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
 
 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
-index 462c0feb8da2..8dd04ba7e7ca 100644
+index 462c0feb8da2..fe590c345f99 100644
 --- a/mm/memcontrol.c
 +++ b/mm/memcontrol.c
 @@ -1749,7 +1749,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
@@ -36,6 +41,24 @@
  
  	return ret;
  }
+@@ -1802,7 +1802,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+ 	struct memcg_stock_pcp *stock;
+ 	unsigned long flags;
+ 
+-	local_irq_save(flags);
++	local_lock_irqsave(event_lock, flags);
+ 
+ 	stock = this_cpu_ptr(&memcg_stock);
+ 	if (stock->cached != memcg) { /* reset if necessary */
+@@ -1811,7 +1811,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+ 	}
+ 	stock->nr_pages += nr_pages;
+ 
+-	local_irq_restore(flags);
++	local_unlock_irqrestore(event_lock, flags);
+ }
+ 
+ /*
 -- 
 2.10.0
 
diff --git a/patches/series b/patches/series
index c039b60..7e04717 100644
--- a/patches/series
+++ b/patches/series
@@ -579,7 +579,8 @@
 
 # CGROUPS
 cgroups-use-simple-wait-in-css_release.patch
-cgroups-scheduling-while-atomic-in-cgroup-code.patch
+# n/a after mainline db2ba40c277dc -- refill_locking fixed above in MM
+#cgroups-scheduling-while-atomic-in-cgroup-code.patch
 
 # New stuff
 # Revisit: We need this in other places as well