mm: promotion migration support

Provide migration API to promote pages to their target node and add
counters for the event.

Signed-off-by: Keith Busch <keith.busch@intel.com>
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 9f957c4..36c9b3d 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -26,12 +26,14 @@
 	MR_NUMA_MISPLACED,
 	MR_CONTIG_RANGE,
 	MR_DEMOTION,
+	MR_PROMOTION,
 	MR_TYPES
 };
 
 enum migrate_hmem_reason {
 	MR_HMEM_UNKNOWN,
 	MR_HMEM_RECLAIM_DEMOTE,
+	MR_HMEM_RECLAIM_PROMOTE,
 	MR_HMEM_NR_REASONS
 };
 
@@ -93,6 +95,7 @@
 		struct page *newpage, struct page *page, enum migrate_mode mode,
 		int extra_count);
 extern int migrate_demote_mapping(struct page *page);
+extern int migrate_promote_mapping(struct page *page);
 #else
 
 static inline void putback_movable_pages(struct list_head *l) {}
@@ -123,6 +126,12 @@
 {
 	return -ENOSYS;
 }
+
+static inline int migrate_promote_mapping(struct page *page)
+{
+	return -ENOSYS;
+}
+
 #endif /* CONFIG_MIGRATION */
 
 #ifdef CONFIG_COMPACTION
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 54489a4..d0b6ece 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -210,6 +210,7 @@
 	HMEM_MIGRATE_UNKNOWN,
 	HMEM_MIGRATE_FIRST_ENTRY = HMEM_MIGRATE_UNKNOWN,
 	HMEM_MIGRATE(MR_HMEM_RECLAIM_DEMOTE),
+	HMEM_MIGRATE(MR_HMEM_RECLAIM_PROMOTE),
 	NR_VM_ZONE_STAT_ITEMS
 };
 
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index a050d7b..e864e10 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -22,7 +22,8 @@
 	EM( MR_MEMPOLICY_MBIND,	"mempolicy_mbind")		\
 	EM( MR_NUMA_MISPLACED,	"numa_misplaced")		\
 	EM( MR_CONTIG_RANGE,	"contig_range")			\
-	EMe(MR_DEMOTION,	"demotion")
+	EM( MR_DEMOTION,	"demotion")			\
+	EMe(MR_PROMOTION,	"promotion")
 
 /*
  * First define the enums in the above macros to be exported to userspace
diff --git a/mm/debug.c b/mm/debug.c
index 0519e57..b9a1809 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -26,6 +26,7 @@
 	"numa_misplaced",
 	"cma",
 	"demotion",
+	"promotion",
 };
 
 const struct trace_print_flags pageflag_names[] = {
diff --git a/mm/migrate.c b/mm/migrate.c
index 419baae..b31eba8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1121,7 +1121,7 @@
 	return rc;
 }
 
-static struct page *alloc_demote_node_page(struct page *page, unsigned long node)
+static struct page *alloc_node_page(struct page *page, unsigned long node)
 {
 	/*
 	 * The flags are set to allocate only on the desired node in the
@@ -1144,20 +1144,9 @@
 	return newpage;
 }
 
-/**
- * migrate_demote_mapping() - Migrate this page and its mappings to its
- *                            demotion node.
- * @page: A locked, isolated, non-huge page that should migrate to its current
- *        node's demotion target, if available. Since this is intended to be
- *        called during memory reclaim, all flag options are set to fail fast.
- *
- * @returns: MIGRATEPAGE_SUCCESS if successful, -errno otherwise.
- */
-int migrate_demote_mapping(struct page *page)
+static int migrate_mapping(struct page *page, int next_nid,
+			   struct migrate_detail *m_detail)
 {
-	int next_nid = next_migration_node(page_to_nid(page));
-	struct migrate_detail m_detail = {};
-
 	VM_BUG_ON_PAGE(!PageLocked(page), page);
 	VM_BUG_ON_PAGE(PageHuge(page), page);
 	VM_BUG_ON_PAGE(PageLRU(page), page);
@@ -1167,13 +1156,52 @@
 	if (PageTransHuge(page) && !thp_migration_supported())
 		return -ENOMEM;
 
+	if (m_detail->reason == MR_PROMOTION)
+		pr_info_once("promote page from %d to %d\n",
+			page_to_nid(page), next_nid);
+
 	/* MIGRATE_ASYNC is the most light weight and never blocks.*/
-	m_detail.reason = MR_DEMOTION;
-	m_detail.h_reason = MR_HMEM_RECLAIM_DEMOTE;
-	return __unmap_and_move(alloc_demote_node_page, NULL, next_nid,
-				page, MIGRATE_ASYNC, &m_detail);
+	return __unmap_and_move(alloc_node_page, NULL, next_nid,
+				page, MIGRATE_ASYNC, m_detail);
 }
 
+/**
+ * migrate_demote_mapping() - Migrate this page and its mappings to its
+ *                            demotion node.
+ * @page: A locked, isolated, non-huge page that should migrate to its current
+ *        node's demotion target, if available.
+ *
+ * @returns: MIGRATEPAGE_SUCCESS if successful, -errno otherwise.
+ */
+int migrate_demote_mapping(struct page *page)
+{
+	struct migrate_detail m_detail = {
+		.reason = MR_DEMOTION,
+		.h_reason = MR_HMEM_RECLAIM_DEMOTE,
+	};
+
+	return migrate_mapping(page, next_migration_node(page_to_nid(page)),
+			       &m_detail);
+}
+
+/**
+ * migrate_promote_mapping() - Migrate this page and its mappings to its
+ *                             promotion node.
+ * @page: A locked, isolated, non-huge page that should migrate to its current
+ *        node's promotion target, if available.
+ *
+ * @returns: MIGRATEPAGE_SUCCESS if successful, -errno otherwise.
+ */
+int migrate_promote_mapping(struct page *page)
+{
+	struct migrate_detail m_detail = {
+		.reason = MR_PROMOTION,
+		.h_reason = MR_HMEM_RECLAIM_PROMOTE,
+	};
+
+	return migrate_mapping(page, next_promotion_node(page_to_nid(page)),
+			       &m_detail);
+}
 
 /*
  * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move().  Work
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 7587c3a..cc8e8754 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1153,6 +1153,8 @@
 	"hmem_unknown",
 	"hmem_reclaim_demote_src",
 	"hmem_reclaim_demote_dst",
+	"hmem_reclaim_promote_src",
+	"hmem_reclaim_promote_dst",
 
 	/* enum numa_stat_item counters */
 #ifdef CONFIG_NUMA