shmem: add page to active list on swapless system

If it's on swapless system, there is no point to add shmem pages
into inactive anonymous LRU list because we couldn't reclaim
them at all.

Meanwhile, MADV_FREE uses inactive anonymous LRU list to quarantine
hinted pages and sweep out them when the memory pressure so shmem
pages are just noise if they stays in inactive anonymous LRU.

This patch add shmem pages into active anonymous LRU list unless
there are swap space.

Signed-off-by: Minchan Kim <minchan@kernel.org>
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 8e944c0..db0cadc 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -297,7 +297,7 @@
 
 /* linux/mm/swap.c */
 extern void lru_cache_add(struct page *);
-extern void lru_cache_add_anon(struct page *page);
+extern void lru_cache_add_anon(struct page *page, bool active);
 extern void lru_cache_add_file(struct page *page);
 extern void lru_add_page_tail(struct page *page, struct page *page_tail,
 			 struct lruvec *lruvec, struct list_head *head);
diff --git a/mm/shmem.c b/mm/shmem.c
index 48ce829..1ec2e4e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1024,7 +1024,7 @@
 		oldpage = newpage;
 	} else {
 		mem_cgroup_migrate(oldpage, newpage, true);
-		lru_cache_add_anon(newpage);
+		lru_cache_add_anon(newpage, total_swap_pages ? false : true);
 		*pagep = newpage;
 	}
 
@@ -1206,7 +1206,7 @@
 			goto decused;
 		}
 		mem_cgroup_commit_charge(page, memcg, false);
-		lru_cache_add_anon(page);
+		lru_cache_add_anon(page, total_swap_pages ? false : true);
 
 		spin_lock(&info->lock);
 		info->alloced++;
diff --git a/mm/swap.c b/mm/swap.c
index a2f2cd4..0e2ff2a 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -642,11 +642,17 @@
 /**
  * lru_cache_add: add a page to the page lists
  * @page: the page to add
+ * @active: active anonymous LRU list
  */
-void lru_cache_add_anon(struct page *page)
+void lru_cache_add_anon(struct page *page, bool active)
 {
-	if (PageActive(page))
-		ClearPageActive(page);
+	if (active) {
+		if (!PageActive(page))
+			SetPageActive(page);
+	} else {
+		if (PageActive(page))
+			ClearPageActive(page);
+	}
 	__lru_cache_add(page);
 }
 
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 10f63ed..3413521 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -360,7 +360,7 @@
 			/*
 			 * Initiate read into locked page and return.
 			 */
-			lru_cache_add_anon(new_page);
+			lru_cache_add_anon(new_page, false);
 			*new_page_allocated = true;
 			return new_page;
 		}