| From: Max Kellermann <max.kellermann@ionos.com> |
| Subject: mm: constify zone related test/getter functions |
| Date: Mon, 1 Sep 2025 22:50:12 +0200 |
| |
| For improved const-correctness. |
| |
| We select certain test functions which either invoke each other, |
| functions that are already const-ified, or no further functions. |
| |
| It is therefore relatively trivial to const-ify them, which provides a |
| basis for further const-ification further up the call stack. |
| |
| Link: https://lkml.kernel.org/r/20250901205021.3573313-4-max.kellermann@ionos.com |
| Signed-off-by: Max Kellermann <max.kellermann@ionos.com> |
| Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> |
| Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> |
| Acked-by: David Hildenbrand <david@redhat.com> |
| Acked-by: Vlastimil Babka <vbabka@suse.cz> |
| Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org> |
| Acked-by: Shakeel Butt <shakeel.butt@linux.dev> |
| Cc: Alexander Gordeev <agordeev@linux.ibm.com> |
| Cc: Al Viro <viro@zeniv.linux.org.uk> |
| Cc: Andreas Larsson <andreas@gaisler.com> |
| Cc: Andy Lutomirski <luto@kernel.org> |
| Cc: Axel Rasmussen <axelrasmussen@google.com> |
| Cc: Baolin Wang <baolin.wang@linux.alibaba.com> |
| Cc: Borislav Betkov <bp@alien8.de> |
| Cc: Christian Borntraeger <borntraeger@linux.ibm.com> |
| Cc: Christian Brauner <brauner@kernel.org> |
| Cc: Christian Zankel <chris@zankel.net> |
| Cc: David Rientjes <rientjes@google.com> |
| Cc: David S. Miller <davem@davemloft.net> |
| Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> |
| Cc: Heiko Carstens <hca@linux.ibm.com> |
| Cc: Helge Deller <deller@gmx.de> |
| Cc: "H. Peter Anvin" <hpa@zytor.com> |
| Cc: Hugh Dickins <hughd@google.com> |
| Cc: Ingo Molnar <mingo@redhat.com> |
| Cc: James Bottomley <james.bottomley@HansenPartnership.com> |
| Cc: Jan Kara <jack@suse.cz> |
| Cc: Jocelyn Falempe <jfalempe@redhat.com> |
| Cc: Liam Howlett <liam.howlett@oracle.com> |
| Cc: Mark Brown <broonie@kernel.org> |
| Cc: Matthew Wilcox (Oracle) <willy@infradead.org> |
| Cc: Max Filippov <jcmvbkbc@gmail.com> |
| Cc: Michael Ellerman <mpe@ellerman.id.au> |
| Cc: Michal Hocko <mhocko@suse.com> |
| Cc: "Nysal Jan K.A" <nysal@linux.ibm.com> |
| Cc: Oscar Salvador <osalvador@suse.de> |
| Cc: Peter Zijlstra <peterz@infradead.org> |
| Cc: Russel King <linux@armlinux.org.uk> |
| Cc: Suren Baghdasaryan <surenb@google.com> |
| Cc: Sven Schnelle <svens@linux.ibm.com> |
| Cc: Thomas Gleinxer <tglx@linutronix.de> |
| Cc: Thomas Huth <thuth@redhat.com> |
| Cc: Vasily Gorbik <gor@linux.ibm.com> |
| Cc: Wei Xu <weixugc@google.com> |
| Cc: Yuanchu Xie <yuanchu@google.com> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| include/linux/mmzone.h | 42 +++++++++++++++++++-------------------- |
| 1 file changed, 21 insertions(+), 21 deletions(-) |
| |
| --- a/include/linux/mmzone.h~mm-constify-zone-related-test-getter-functions |
| +++ a/include/linux/mmzone.h |
| @@ -1104,7 +1104,7 @@ static inline unsigned long promo_wmark_ |
| return wmark_pages(z, WMARK_PROMO); |
| } |
| |
| -static inline unsigned long zone_managed_pages(struct zone *zone) |
| +static inline unsigned long zone_managed_pages(const struct zone *zone) |
| { |
| return (unsigned long)atomic_long_read(&zone->managed_pages); |
| } |
| @@ -1128,12 +1128,12 @@ static inline bool zone_spans_pfn(const |
| return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); |
| } |
| |
| -static inline bool zone_is_initialized(struct zone *zone) |
| +static inline bool zone_is_initialized(const struct zone *zone) |
| { |
| return zone->initialized; |
| } |
| |
| -static inline bool zone_is_empty(struct zone *zone) |
| +static inline bool zone_is_empty(const struct zone *zone) |
| { |
| return zone->spanned_pages == 0; |
| } |
| @@ -1273,7 +1273,7 @@ static inline bool folio_is_zone_movable |
| * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty |
| * intersection with the given zone |
| */ |
| -static inline bool zone_intersects(struct zone *zone, |
| +static inline bool zone_intersects(const struct zone *zone, |
| unsigned long start_pfn, unsigned long nr_pages) |
| { |
| if (zone_is_empty(zone)) |
| @@ -1581,12 +1581,12 @@ static inline int local_memory_node(int |
| #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) |
| |
| #ifdef CONFIG_ZONE_DEVICE |
| -static inline bool zone_is_zone_device(struct zone *zone) |
| +static inline bool zone_is_zone_device(const struct zone *zone) |
| { |
| return zone_idx(zone) == ZONE_DEVICE; |
| } |
| #else |
| -static inline bool zone_is_zone_device(struct zone *zone) |
| +static inline bool zone_is_zone_device(const struct zone *zone) |
| { |
| return false; |
| } |
| @@ -1598,19 +1598,19 @@ static inline bool zone_is_zone_device(s |
| * populated_zone(). If the whole zone is reserved then we can easily |
| * end up with populated_zone() && !managed_zone(). |
| */ |
| -static inline bool managed_zone(struct zone *zone) |
| +static inline bool managed_zone(const struct zone *zone) |
| { |
| return zone_managed_pages(zone); |
| } |
| |
| /* Returns true if a zone has memory */ |
| -static inline bool populated_zone(struct zone *zone) |
| +static inline bool populated_zone(const struct zone *zone) |
| { |
| return zone->present_pages; |
| } |
| |
| #ifdef CONFIG_NUMA |
| -static inline int zone_to_nid(struct zone *zone) |
| +static inline int zone_to_nid(const struct zone *zone) |
| { |
| return zone->node; |
| } |
| @@ -1620,7 +1620,7 @@ static inline void zone_set_nid(struct z |
| zone->node = nid; |
| } |
| #else |
| -static inline int zone_to_nid(struct zone *zone) |
| +static inline int zone_to_nid(const struct zone *zone) |
| { |
| return 0; |
| } |
| @@ -1647,7 +1647,7 @@ static inline int is_highmem_idx(enum zo |
| * @zone: pointer to struct zone variable |
| * Return: 1 for a highmem zone, 0 otherwise |
| */ |
| -static inline int is_highmem(struct zone *zone) |
| +static inline int is_highmem(const struct zone *zone) |
| { |
| return is_highmem_idx(zone_idx(zone)); |
| } |
| @@ -1713,12 +1713,12 @@ static inline struct zone *zonelist_zone |
| return zoneref->zone; |
| } |
| |
| -static inline int zonelist_zone_idx(struct zoneref *zoneref) |
| +static inline int zonelist_zone_idx(const struct zoneref *zoneref) |
| { |
| return zoneref->zone_idx; |
| } |
| |
| -static inline int zonelist_node_idx(struct zoneref *zoneref) |
| +static inline int zonelist_node_idx(const struct zoneref *zoneref) |
| { |
| return zone_to_nid(zoneref->zone); |
| } |
| @@ -2021,7 +2021,7 @@ static inline struct page *__section_mem |
| return (struct page *)map; |
| } |
| |
| -static inline int present_section(struct mem_section *section) |
| +static inline int present_section(const struct mem_section *section) |
| { |
| return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); |
| } |
| @@ -2031,12 +2031,12 @@ static inline int present_section_nr(uns |
| return present_section(__nr_to_section(nr)); |
| } |
| |
| -static inline int valid_section(struct mem_section *section) |
| +static inline int valid_section(const struct mem_section *section) |
| { |
| return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); |
| } |
| |
| -static inline int early_section(struct mem_section *section) |
| +static inline int early_section(const struct mem_section *section) |
| { |
| return (section && (section->section_mem_map & SECTION_IS_EARLY)); |
| } |
| @@ -2046,27 +2046,27 @@ static inline int valid_section_nr(unsig |
| return valid_section(__nr_to_section(nr)); |
| } |
| |
| -static inline int online_section(struct mem_section *section) |
| +static inline int online_section(const struct mem_section *section) |
| { |
| return (section && (section->section_mem_map & SECTION_IS_ONLINE)); |
| } |
| |
| #ifdef CONFIG_ZONE_DEVICE |
| -static inline int online_device_section(struct mem_section *section) |
| +static inline int online_device_section(const struct mem_section *section) |
| { |
| unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE; |
| |
| return section && ((section->section_mem_map & flags) == flags); |
| } |
| #else |
| -static inline int online_device_section(struct mem_section *section) |
| +static inline int online_device_section(const struct mem_section *section) |
| { |
| return 0; |
| } |
| #endif |
| |
| #ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT |
| -static inline int preinited_vmemmap_section(struct mem_section *section) |
| +static inline int preinited_vmemmap_section(const struct mem_section *section) |
| { |
| return (section && |
| (section->section_mem_map & SECTION_IS_VMEMMAP_PREINIT)); |
| @@ -2076,7 +2076,7 @@ void sparse_vmemmap_init_nid_early(int n |
| void sparse_vmemmap_init_nid_late(int nid); |
| |
| #else |
| -static inline int preinited_vmemmap_section(struct mem_section *section) |
| +static inline int preinited_vmemmap_section(const struct mem_section *section) |
| { |
| return 0; |
| } |
| _ |