| From: Max Kellermann <max.kellermann@ionos.com> |
| Subject: mm: constify pagemap related test/getter functions |
| Date: Mon, 1 Sep 2025 22:50:11 +0200 |
| |
| For improved const-correctness. |
| |
| We select certain test functions which either invoke each other, functions |
| that are already const-ified, or no further functions. |
| |
| It is therefore relatively trivial to const-ify them, which provides a |
| basis for further const-ification further up the call stack. |
| |
| Link: https://lkml.kernel.org/r/20250901205021.3573313-3-max.kellermann@ionos.com |
| Signed-off-by: Max Kellermann <max.kellermann@ionos.com> |
| Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> |
| Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> |
| Acked-by: David Hildenbrand <david@redhat.com> |
| Acked-by: Vlastimil Babka <vbabka@suse.cz> |
| Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org> |
| Acked-by: Shakeel Butt <shakeel.butt@linux.dev> |
| Cc: Alexander Gordeev <agordeev@linux.ibm.com> |
| Cc: Al Viro <viro@zeniv.linux.org.uk> |
| Cc: Andreas Larsson <andreas@gaisler.com> |
| Cc: Andy Lutomirski <luto@kernel.org> |
| Cc: Axel Rasmussen <axelrasmussen@google.com> |
| Cc: Baolin Wang <baolin.wang@linux.alibaba.com> |
| Cc: Borislav Betkov <bp@alien8.de> |
| Cc: Christian Borntraeger <borntraeger@linux.ibm.com> |
| Cc: Christian Brauner <brauner@kernel.org> |
| Cc: Christian Zankel <chris@zankel.net> |
| Cc: David Rientjes <rientjes@google.com> |
| Cc: David S. Miller <davem@davemloft.net> |
| Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> |
| Cc: Heiko Carstens <hca@linux.ibm.com> |
| Cc: Helge Deller <deller@gmx.de> |
| Cc: "H. Peter Anvin" <hpa@zytor.com> |
| Cc: Hugh Dickins <hughd@google.com> |
| Cc: Ingo Molnar <mingo@redhat.com> |
| Cc: James Bottomley <james.bottomley@HansenPartnership.com> |
| Cc: Jan Kara <jack@suse.cz> |
| Cc: Jocelyn Falempe <jfalempe@redhat.com> |
| Cc: Liam Howlett <liam.howlett@oracle.com> |
| Cc: Mark Brown <broonie@kernel.org> |
| Cc: Matthew Wilcox (Oracle) <willy@infradead.org> |
| Cc: Max Filippov <jcmvbkbc@gmail.com> |
| Cc: Michael Ellerman <mpe@ellerman.id.au> |
| Cc: Michal Hocko <mhocko@suse.com> |
| Cc: "Nysal Jan K.A" <nysal@linux.ibm.com> |
| Cc: Oscar Salvador <osalvador@suse.de> |
| Cc: Peter Zijlstra <peterz@infradead.org> |
| Cc: Russel King <linux@armlinux.org.uk> |
| Cc: Suren Baghdasaryan <surenb@google.com> |
| Cc: Sven Schnelle <svens@linux.ibm.com> |
| Cc: Thomas Gleinxer <tglx@linutronix.de> |
| Cc: Thomas Huth <thuth@redhat.com> |
| Cc: Vasily Gorbik <gor@linux.ibm.com> |
| Cc: Wei Xu <weixugc@google.com> |
| Cc: Yuanchu Xie <yuanchu@google.com> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| include/linux/pagemap.h | 55 +++++++++++++++++++------------------- |
| 1 file changed, 28 insertions(+), 27 deletions(-) |
| |
| --- a/include/linux/pagemap.h~mm-constify-pagemap-related-test-getter-functions |
| +++ a/include/linux/pagemap.h |
| @@ -140,7 +140,7 @@ static inline int inode_drain_writes(str |
| return filemap_write_and_wait(inode->i_mapping); |
| } |
| |
| -static inline bool mapping_empty(struct address_space *mapping) |
| +static inline bool mapping_empty(const struct address_space *mapping) |
| { |
| return xa_empty(&mapping->i_pages); |
| } |
| @@ -166,7 +166,7 @@ static inline bool mapping_empty(struct |
| * refcount and the referenced bit, which will be elevated or set in |
| * the process of adding new cache pages to an inode. |
| */ |
| -static inline bool mapping_shrinkable(struct address_space *mapping) |
| +static inline bool mapping_shrinkable(const struct address_space *mapping) |
| { |
| void *head; |
| |
| @@ -267,7 +267,7 @@ static inline void mapping_clear_unevict |
| clear_bit(AS_UNEVICTABLE, &mapping->flags); |
| } |
| |
| -static inline bool mapping_unevictable(struct address_space *mapping) |
| +static inline bool mapping_unevictable(const struct address_space *mapping) |
| { |
| return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); |
| } |
| @@ -277,7 +277,7 @@ static inline void mapping_set_exiting(s |
| set_bit(AS_EXITING, &mapping->flags); |
| } |
| |
| -static inline int mapping_exiting(struct address_space *mapping) |
| +static inline int mapping_exiting(const struct address_space *mapping) |
| { |
| return test_bit(AS_EXITING, &mapping->flags); |
| } |
| @@ -287,7 +287,7 @@ static inline void mapping_set_no_writeb |
| set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); |
| } |
| |
| -static inline int mapping_use_writeback_tags(struct address_space *mapping) |
| +static inline int mapping_use_writeback_tags(const struct address_space *mapping) |
| { |
| return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); |
| } |
| @@ -333,7 +333,7 @@ static inline void mapping_set_inaccessi |
| set_bit(AS_INACCESSIBLE, &mapping->flags); |
| } |
| |
| -static inline bool mapping_inaccessible(struct address_space *mapping) |
| +static inline bool mapping_inaccessible(const struct address_space *mapping) |
| { |
| return test_bit(AS_INACCESSIBLE, &mapping->flags); |
| } |
| @@ -343,18 +343,18 @@ static inline void mapping_set_writeback |
| set_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags); |
| } |
| |
| -static inline bool mapping_writeback_may_deadlock_on_reclaim(struct address_space *mapping) |
| +static inline bool mapping_writeback_may_deadlock_on_reclaim(const struct address_space *mapping) |
| { |
| return test_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags); |
| } |
| |
| -static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
| +static inline gfp_t mapping_gfp_mask(const struct address_space *mapping) |
| { |
| return mapping->gfp_mask; |
| } |
| |
| /* Restricts the given gfp_mask to what the mapping allows. */ |
| -static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, |
| +static inline gfp_t mapping_gfp_constraint(const struct address_space *mapping, |
| gfp_t gfp_mask) |
| { |
| return mapping_gfp_mask(mapping) & gfp_mask; |
| @@ -477,7 +477,7 @@ mapping_min_folio_order(const struct add |
| } |
| |
| static inline unsigned long |
| -mapping_min_folio_nrpages(struct address_space *mapping) |
| +mapping_min_folio_nrpages(const struct address_space *mapping) |
| { |
| return 1UL << mapping_min_folio_order(mapping); |
| } |
| @@ -491,7 +491,7 @@ mapping_min_folio_nrpages(struct address |
| * new folio to the page cache and need to know what index to give it, |
| * call this function. |
| */ |
| -static inline pgoff_t mapping_align_index(struct address_space *mapping, |
| +static inline pgoff_t mapping_align_index(const struct address_space *mapping, |
| pgoff_t index) |
| { |
| return round_down(index, mapping_min_folio_nrpages(mapping)); |
| @@ -501,7 +501,7 @@ static inline pgoff_t mapping_align_inde |
| * Large folio support currently depends on THP. These dependencies are |
| * being worked on but are not yet fixed. |
| */ |
| -static inline bool mapping_large_folio_support(struct address_space *mapping) |
| +static inline bool mapping_large_folio_support(const struct address_space *mapping) |
| { |
| /* AS_FOLIO_ORDER is only reasonable for pagecache folios */ |
| VM_WARN_ONCE((unsigned long)mapping & FOLIO_MAPPING_ANON, |
| @@ -516,7 +516,7 @@ static inline size_t mapping_max_folio_s |
| return PAGE_SIZE << mapping_max_folio_order(mapping); |
| } |
| |
| -static inline int filemap_nr_thps(struct address_space *mapping) |
| +static inline int filemap_nr_thps(const struct address_space *mapping) |
| { |
| #ifdef CONFIG_READ_ONLY_THP_FOR_FS |
| return atomic_read(&mapping->nr_thps); |
| @@ -930,7 +930,7 @@ static inline struct page *grab_cache_pa |
| * |
| * Return: The index of the folio which follows this folio in the file. |
| */ |
| -static inline pgoff_t folio_next_index(struct folio *folio) |
| +static inline pgoff_t folio_next_index(const struct folio *folio) |
| { |
| return folio->index + folio_nr_pages(folio); |
| } |
| @@ -959,7 +959,7 @@ static inline struct page *folio_file_pa |
| * e.g., shmem did not move this folio to the swap cache. |
| * Return: true or false. |
| */ |
| -static inline bool folio_contains(struct folio *folio, pgoff_t index) |
| +static inline bool folio_contains(const struct folio *folio, pgoff_t index) |
| { |
| VM_WARN_ON_ONCE_FOLIO(folio_test_swapcache(folio), folio); |
| return index - folio->index < folio_nr_pages(folio); |
| @@ -1036,13 +1036,13 @@ static inline loff_t page_offset(struct |
| /* |
| * Get the offset in PAGE_SIZE (even for hugetlb folios). |
| */ |
| -static inline pgoff_t folio_pgoff(struct folio *folio) |
| +static inline pgoff_t folio_pgoff(const struct folio *folio) |
| { |
| return folio->index; |
| } |
| |
| -static inline pgoff_t linear_page_index(struct vm_area_struct *vma, |
| - unsigned long address) |
| +static inline pgoff_t linear_page_index(const struct vm_area_struct *vma, |
| + const unsigned long address) |
| { |
| pgoff_t pgoff; |
| pgoff = (address - vma->vm_start) >> PAGE_SHIFT; |
| @@ -1462,7 +1462,7 @@ static inline unsigned int __readahead_b |
| * readahead_pos - The byte offset into the file of this readahead request. |
| * @rac: The readahead request. |
| */ |
| -static inline loff_t readahead_pos(struct readahead_control *rac) |
| +static inline loff_t readahead_pos(const struct readahead_control *rac) |
| { |
| return (loff_t)rac->_index * PAGE_SIZE; |
| } |
| @@ -1471,7 +1471,7 @@ static inline loff_t readahead_pos(struc |
| * readahead_length - The number of bytes in this readahead request. |
| * @rac: The readahead request. |
| */ |
| -static inline size_t readahead_length(struct readahead_control *rac) |
| +static inline size_t readahead_length(const struct readahead_control *rac) |
| { |
| return rac->_nr_pages * PAGE_SIZE; |
| } |
| @@ -1480,7 +1480,7 @@ static inline size_t readahead_length(st |
| * readahead_index - The index of the first page in this readahead request. |
| * @rac: The readahead request. |
| */ |
| -static inline pgoff_t readahead_index(struct readahead_control *rac) |
| +static inline pgoff_t readahead_index(const struct readahead_control *rac) |
| { |
| return rac->_index; |
| } |
| @@ -1489,7 +1489,7 @@ static inline pgoff_t readahead_index(st |
| * readahead_count - The number of pages in this readahead request. |
| * @rac: The readahead request. |
| */ |
| -static inline unsigned int readahead_count(struct readahead_control *rac) |
| +static inline unsigned int readahead_count(const struct readahead_control *rac) |
| { |
| return rac->_nr_pages; |
| } |
| @@ -1498,12 +1498,12 @@ static inline unsigned int readahead_cou |
| * readahead_batch_length - The number of bytes in the current batch. |
| * @rac: The readahead request. |
| */ |
| -static inline size_t readahead_batch_length(struct readahead_control *rac) |
| +static inline size_t readahead_batch_length(const struct readahead_control *rac) |
| { |
| return rac->_batch_count * PAGE_SIZE; |
| } |
| |
| -static inline unsigned long dir_pages(struct inode *inode) |
| +static inline unsigned long dir_pages(const struct inode *inode) |
| { |
| return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> |
| PAGE_SHIFT; |
| @@ -1517,8 +1517,8 @@ static inline unsigned long dir_pages(st |
| * Return: the number of bytes in the folio up to EOF, |
| * or -EFAULT if the folio was truncated. |
| */ |
| -static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio, |
| - struct inode *inode) |
| +static inline ssize_t folio_mkwrite_check_truncate(const struct folio *folio, |
| + const struct inode *inode) |
| { |
| loff_t size = i_size_read(inode); |
| pgoff_t index = size >> PAGE_SHIFT; |
| @@ -1549,7 +1549,8 @@ static inline ssize_t folio_mkwrite_chec |
| * Return: The number of filesystem blocks covered by this folio. |
| */ |
| static inline |
| -unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio) |
| +unsigned int i_blocks_per_folio(const struct inode *inode, |
| + const struct folio *folio) |
| { |
| return folio_size(folio) >> inode->i_blkbits; |
| } |
| _ |