swap: refresh context for new unevictable condition

Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/patches/mm-convert-swap-to-percpu-locked.patch b/patches/mm-convert-swap-to-percpu-locked.patch
index 547891b..51dc6b7 100644
--- a/patches/mm-convert-swap-to-percpu-locked.patch
+++ b/patches/mm-convert-swap-to-percpu-locked.patch
@@ -1,4 +1,4 @@
-From a0e106b941dd55f64386766669bc59fa4037f453 Mon Sep 17 00:00:00 2001
+From 9ef6d64566b8b750c81f63c1e2472496d367e01b Mon Sep 17 00:00:00 2001
 From: Ingo Molnar <mingo@elte.hu>
 Date: Fri, 3 Jul 2009 08:29:51 -0500
 Subject: [PATCH] mm/swap: Convert to percpu locked
@@ -22,10 +22,10 @@
  extern void lru_cache_add_anon(struct page *page);
  extern void lru_cache_add_file(struct page *page);
 diff --git a/mm/compaction.c b/mm/compaction.c
-index 09c5282ebdd2..caab6ed7f2b5 100644
+index 613c59e928cb..8a5a2541b47c 100644
 --- a/mm/compaction.c
 +++ b/mm/compaction.c
-@@ -1601,10 +1601,12 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
+@@ -1634,10 +1634,12 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
  				block_start_pfn(cc->migrate_pfn, cc->order);
  
  			if (cc->last_migrated_pfn < current_block_start) {
@@ -41,10 +41,10 @@
  				cc->last_migrated_pfn = 0;
  			}
 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index eb8105747829..045a41ab88ce 100644
+index 0cb4469aac98..d9e7b90aad9c 100644
 --- a/mm/page_alloc.c
 +++ b/mm/page_alloc.c
-@@ -6747,8 +6747,9 @@ void __init free_area_init(unsigned long *zones_size)
+@@ -6845,8 +6845,9 @@ void __init free_area_init(unsigned long *zones_size)
  
  static int page_alloc_cpu_dead(unsigned int cpu)
  {
@@ -56,7 +56,7 @@
  
  	/*
 diff --git a/mm/swap.c b/mm/swap.c
-index 98d08b4579fa..e81fddc352f9 100644
+index 1efa5a132d5c..b1678dcecccb 100644
 --- a/mm/swap.c
 +++ b/mm/swap.c
 @@ -32,6 +32,7 @@
@@ -169,7 +169,7 @@
 @@ -665,19 +670,20 @@ void mark_page_lazyfree(struct page *page)
  {
  	if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
- 	    !PageUnevictable(page)) {
+ 	    !PageSwapCache(page) && !PageUnevictable(page)) {
 -		struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
 +		struct pagevec *pvec = &get_locked_var(swapvec_lock,
 +						       lru_lazyfree_pvecs);
@@ -192,5 +192,5 @@
  
  static void lru_add_drain_per_cpu(struct work_struct *dummy)
 -- 
-2.1.4
+2.15.0