| From: Kinsey Ho <kinseyho@google.com> |
| Subject: mm/mglru: remove CONFIG_MEMCG |
| Date: Wed, 27 Dec 2023 14:12:03 +0000 |
| |
| Remove CONFIG_MEMCG in a refactoring to improve code readability at |
| the cost of a few bytes in struct lru_gen_folio per node when |
| CONFIG_MEMCG=n. |
| |
| Link: https://lkml.kernel.org/r/20231227141205.2200125-4-kinseyho@google.com |
| Signed-off-by: Kinsey Ho <kinseyho@google.com> |
| Co-developed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> |
| Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> |
| Tested-by: Donet Tom <donettom@linux.vnet.ibm.com> |
| Acked-by: Yu Zhao <yuzhao@google.com> |
| Cc: kernel test robot <lkp@intel.com> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| include/linux/mm_types.h | 4 -- |
| include/linux/mmzone.h | 26 +------------- |
| mm/vmscan.c | 67 +++++++++++-------------------------- |
| 3 files changed, 23 insertions(+), 74 deletions(-) |
| |
| --- a/include/linux/mm_types.h~mm-mglru-remove-config_memcg |
| +++ a/include/linux/mm_types.h |
| @@ -1017,9 +1017,7 @@ struct lru_gen_mm_list { |
| |
| void lru_gen_add_mm(struct mm_struct *mm); |
| void lru_gen_del_mm(struct mm_struct *mm); |
| -#ifdef CONFIG_MEMCG |
| void lru_gen_migrate_mm(struct mm_struct *mm); |
| -#endif |
| |
| static inline void lru_gen_init_mm(struct mm_struct *mm) |
| { |
| @@ -1050,11 +1048,9 @@ static inline void lru_gen_del_mm(struct |
| { |
| } |
| |
| -#ifdef CONFIG_MEMCG |
| static inline void lru_gen_migrate_mm(struct mm_struct *mm) |
| { |
| } |
| -#endif |
| |
| static inline void lru_gen_init_mm(struct mm_struct *mm) |
| { |
| --- a/include/linux/mmzone.h~mm-mglru-remove-config_memcg |
| +++ a/include/linux/mmzone.h |
| @@ -440,14 +440,12 @@ struct lru_gen_folio { |
| atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; |
| /* whether the multi-gen LRU is enabled */ |
| bool enabled; |
| -#ifdef CONFIG_MEMCG |
| /* the memcg generation this lru_gen_folio belongs to */ |
| u8 gen; |
| /* the list segment this lru_gen_folio belongs to */ |
| u8 seg; |
| /* per-node lru_gen_folio list for global reclaim */ |
| struct hlist_nulls_node list; |
| -#endif |
| }; |
| |
| enum { |
| @@ -493,11 +491,6 @@ struct lru_gen_mm_walk { |
| bool force_scan; |
| }; |
| |
| -void lru_gen_init_lruvec(struct lruvec *lruvec); |
| -void lru_gen_look_around(struct page_vma_mapped_walk *pvmw); |
| - |
| -#ifdef CONFIG_MEMCG |
| - |
| /* |
| * For each node, memcgs are divided into two generations: the old and the |
| * young. For each generation, memcgs are randomly sharded into multiple bins |
| @@ -555,6 +548,8 @@ struct lru_gen_memcg { |
| }; |
| |
| void lru_gen_init_pgdat(struct pglist_data *pgdat); |
| +void lru_gen_init_lruvec(struct lruvec *lruvec); |
| +void lru_gen_look_around(struct page_vma_mapped_walk *pvmw); |
| |
| void lru_gen_init_memcg(struct mem_cgroup *memcg); |
| void lru_gen_exit_memcg(struct mem_cgroup *memcg); |
| @@ -563,19 +558,6 @@ void lru_gen_offline_memcg(struct mem_cg |
| void lru_gen_release_memcg(struct mem_cgroup *memcg); |
| void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid); |
| |
| -#else /* !CONFIG_MEMCG */ |
| - |
| -#define MEMCG_NR_GENS 1 |
| - |
| -struct lru_gen_memcg { |
| -}; |
| - |
| -static inline void lru_gen_init_pgdat(struct pglist_data *pgdat) |
| -{ |
| -} |
| - |
| -#endif /* CONFIG_MEMCG */ |
| - |
| #else /* !CONFIG_LRU_GEN */ |
| |
| static inline void lru_gen_init_pgdat(struct pglist_data *pgdat) |
| @@ -590,8 +572,6 @@ static inline void lru_gen_look_around(s |
| { |
| } |
| |
| -#ifdef CONFIG_MEMCG |
| - |
| static inline void lru_gen_init_memcg(struct mem_cgroup *memcg) |
| { |
| } |
| @@ -616,8 +596,6 @@ static inline void lru_gen_soft_reclaim( |
| { |
| } |
| |
| -#endif /* CONFIG_MEMCG */ |
| - |
| #endif /* CONFIG_LRU_GEN */ |
| |
| struct lruvec { |
| --- a/mm/vmscan.c~mm-mglru-remove-config_memcg |
| +++ a/mm/vmscan.c |
| @@ -4097,13 +4097,6 @@ enum { |
| MEMCG_LRU_YOUNG, |
| }; |
| |
| -#ifdef CONFIG_MEMCG |
| - |
| -static int lru_gen_memcg_seg(struct lruvec *lruvec) |
| -{ |
| - return READ_ONCE(lruvec->lrugen.seg); |
| -} |
| - |
| static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op) |
| { |
| int seg; |
| @@ -4150,6 +4143,8 @@ static void lru_gen_rotate_memcg(struct |
| spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags); |
| } |
| |
| +#ifdef CONFIG_MEMCG |
| + |
| void lru_gen_online_memcg(struct mem_cgroup *memcg) |
| { |
| int gen; |
| @@ -4217,18 +4212,11 @@ void lru_gen_soft_reclaim(struct mem_cgr |
| struct lruvec *lruvec = get_lruvec(memcg, nid); |
| |
| /* see the comment on MEMCG_NR_GENS */ |
| - if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_HEAD) |
| + if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_HEAD) |
| lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD); |
| } |
| |
| -#else /* !CONFIG_MEMCG */ |
| - |
| -static int lru_gen_memcg_seg(struct lruvec *lruvec) |
| -{ |
| - return 0; |
| -} |
| - |
| -#endif |
| +#endif /* CONFIG_MEMCG */ |
| |
| /****************************************************************************** |
| * the eviction |
| @@ -4776,7 +4764,7 @@ static int shrink_one(struct lruvec *lru |
| |
| if (mem_cgroup_below_low(NULL, memcg)) { |
| /* see the comment on MEMCG_NR_GENS */ |
| - if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_TAIL) |
| + if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL) |
| return MEMCG_LRU_TAIL; |
| |
| memcg_memory_event(memcg, MEMCG_LOW); |
| @@ -4799,12 +4787,10 @@ static int shrink_one(struct lruvec *lru |
| return 0; |
| |
| /* one retry if offlined or too small */ |
| - return lru_gen_memcg_seg(lruvec) != MEMCG_LRU_TAIL ? |
| + return READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL ? |
| MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG; |
| } |
| |
| -#ifdef CONFIG_MEMCG |
| - |
| static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) |
| { |
| int op; |
| @@ -4896,20 +4882,6 @@ static void lru_gen_shrink_lruvec(struct |
| blk_finish_plug(&plug); |
| } |
| |
| -#else /* !CONFIG_MEMCG */ |
| - |
| -static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) |
| -{ |
| - BUILD_BUG(); |
| -} |
| - |
| -static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) |
| -{ |
| - BUILD_BUG(); |
| -} |
| - |
| -#endif |
| - |
| static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc) |
| { |
| int priority; |
| @@ -5560,6 +5532,18 @@ static const struct file_operations lru_ |
| * initialization |
| ******************************************************************************/ |
| |
| +void lru_gen_init_pgdat(struct pglist_data *pgdat) |
| +{ |
| + int i, j; |
| + |
| + spin_lock_init(&pgdat->memcg_lru.lock); |
| + |
| + for (i = 0; i < MEMCG_NR_GENS; i++) { |
| + for (j = 0; j < MEMCG_NR_BINS; j++) |
| + INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i); |
| + } |
| +} |
| + |
| void lru_gen_init_lruvec(struct lruvec *lruvec) |
| { |
| int i; |
| @@ -5582,18 +5566,6 @@ void lru_gen_init_lruvec(struct lruvec * |
| |
| #ifdef CONFIG_MEMCG |
| |
| -void lru_gen_init_pgdat(struct pglist_data *pgdat) |
| -{ |
| - int i, j; |
| - |
| - spin_lock_init(&pgdat->memcg_lru.lock); |
| - |
| - for (i = 0; i < MEMCG_NR_GENS; i++) { |
| - for (j = 0; j < MEMCG_NR_BINS; j++) |
| - INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i); |
| - } |
| -} |
| - |
| void lru_gen_init_memcg(struct mem_cgroup *memcg) |
| { |
| struct lru_gen_mm_list *mm_list = get_mm_list(memcg); |
| @@ -5653,14 +5625,17 @@ late_initcall(init_lru_gen); |
| |
| static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) |
| { |
| + BUILD_BUG(); |
| } |
| |
| static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) |
| { |
| + BUILD_BUG(); |
| } |
| |
| static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) |
| { |
| + BUILD_BUG(); |
| } |
| |
| #endif /* CONFIG_LRU_GEN */ |
| _ |