blob: 0cc2a0b74bdceba151ae58de4f9f4966107b3290 [file] [log] [blame]
From: Usama Arif <usamaarif642@gmail.com>
Subject: mm: add sysfs entry to disable splitting underutilized THPs
Date: Tue, 13 Aug 2024 13:02:49 +0100
If disabled, THPs faulted in or collapsed will not be added to
_deferred_list, and therefore won't be considered for splitting under
memory pressure if underutilized.
Link: https://lkml.kernel.org/r/20240813120328.1275952-7-usamaarif642@gmail.com
Signed-off-by: Usama Arif <usamaarif642@gmail.com>
Cc: Alexander Zhu <alexlzhu@fb.com>
Cc: Barry Song <baohua@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Domenico Cerasuolo <cerasuolodomenico@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Shuang Zhai <zhais@google.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Huan Yang <link@vivo.com>
Cc: Kairui Song <ryncsn@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
mm/huge_memory.c | 26 ++++++++++++++++++++++++++
1 file changed, 26 insertions(+)
--- a/mm/huge_memory.c~mm-add-sysfs-entry-to-disable-splitting-underutilized-thps
+++ a/mm/huge_memory.c
@@ -74,6 +74,7 @@ static unsigned long deferred_split_coun
struct shrink_control *sc);
static unsigned long deferred_split_scan(struct shrinker *shrink,
struct shrink_control *sc);
+static bool split_underutilized_thp = true;
static atomic_t huge_zero_refcount;
struct folio *huge_zero_folio __read_mostly;
@@ -439,6 +440,27 @@ static ssize_t hpage_pmd_size_show(struc
static struct kobj_attribute hpage_pmd_size_attr =
__ATTR_RO(hpage_pmd_size);
+static ssize_t split_underutilized_thp_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", split_underutilized_thp);
+}
+
+static ssize_t split_underutilized_thp_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err = kstrtobool(buf, &split_underutilized_thp);
+
+ if (err < 0)
+ return err;
+
+ return count;
+}
+
+static struct kobj_attribute split_underutilized_thp_attr = __ATTR(
+ thp_low_util_shrinker, 0644, split_underutilized_thp_show, split_underutilized_thp_store);
+
static struct attribute *hugepage_attr[] = {
&enabled_attr.attr,
&defrag_attr.attr,
@@ -447,6 +469,7 @@ static struct attribute *hugepage_attr[]
#ifdef CONFIG_SHMEM
&shmem_enabled_attr.attr,
#endif
+ &split_underutilized_thp_attr.attr,
NULL,
};
@@ -3469,6 +3492,9 @@ void deferred_split_folio(struct folio *
if (folio_order(folio) <= 1)
return;
+ if (!partially_mapped && !split_underutilized_thp)
+ return;
+
/*
* The try_to_unmap() in page reclaim path might reach here too,
* this may cause a race condition to corrupt deferred split queue.
_