|  | /* SPDX-License-Identifier: GPL-2.0 */ | 
|  | #ifndef _MM_SWAP_H | 
|  | #define _MM_SWAP_H | 
|  |  | 
|  | struct mempolicy; | 
|  | struct swap_iocb; | 
|  |  | 
|  | extern int page_cluster; | 
|  |  | 
|  | #ifdef CONFIG_SWAP | 
|  | #include <linux/swapops.h> /* for swp_offset */ | 
|  | #include <linux/blk_types.h> /* for bio_end_io_t */ | 
|  |  | 
|  | /* linux/mm/page_io.c */ | 
|  | int sio_pool_init(void); | 
|  | struct swap_iocb; | 
|  | void swap_read_folio(struct folio *folio, struct swap_iocb **plug); | 
|  | void __swap_read_unplug(struct swap_iocb *plug); | 
|  | static inline void swap_read_unplug(struct swap_iocb *plug) | 
|  | { | 
|  | if (unlikely(plug)) | 
|  | __swap_read_unplug(plug); | 
|  | } | 
|  | void swap_write_unplug(struct swap_iocb *sio); | 
|  | int swap_writeout(struct folio *folio, struct swap_iocb **swap_plug); | 
|  | void __swap_writepage(struct folio *folio, struct swap_iocb **swap_plug); | 
|  |  | 
|  | /* linux/mm/swap_state.c */ | 
|  | /* One swap address space for each 64M swap space */ | 
|  | #define SWAP_ADDRESS_SPACE_SHIFT	14 | 
|  | #define SWAP_ADDRESS_SPACE_PAGES	(1 << SWAP_ADDRESS_SPACE_SHIFT) | 
|  | #define SWAP_ADDRESS_SPACE_MASK		(SWAP_ADDRESS_SPACE_PAGES - 1) | 
|  | extern struct address_space *swapper_spaces[]; | 
|  | #define swap_address_space(entry)			    \ | 
|  | (&swapper_spaces[swp_type(entry)][swp_offset(entry) \ | 
|  | >> SWAP_ADDRESS_SPACE_SHIFT]) | 
|  |  | 
|  | /* | 
|  | * Return the swap device position of the swap entry. | 
|  | */ | 
|  | static inline loff_t swap_dev_pos(swp_entry_t entry) | 
|  | { | 
|  | return ((loff_t)swp_offset(entry)) << PAGE_SHIFT; | 
|  | } | 
|  |  | 
|  | /* | 
|  | * Return the swap cache index of the swap entry. | 
|  | */ | 
|  | static inline pgoff_t swap_cache_index(swp_entry_t entry) | 
|  | { | 
|  | BUILD_BUG_ON((SWP_OFFSET_MASK | SWAP_ADDRESS_SPACE_MASK) != SWP_OFFSET_MASK); | 
|  | return swp_offset(entry) & SWAP_ADDRESS_SPACE_MASK; | 
|  | } | 
|  |  | 
|  | void show_swap_cache_info(void); | 
|  | void *get_shadow_from_swap_cache(swp_entry_t entry); | 
|  | int add_to_swap_cache(struct folio *folio, swp_entry_t entry, | 
|  | gfp_t gfp, void **shadowp); | 
|  | void __delete_from_swap_cache(struct folio *folio, | 
|  | swp_entry_t entry, void *shadow); | 
|  | void delete_from_swap_cache(struct folio *folio); | 
|  | void clear_shadow_from_swap_cache(int type, unsigned long begin, | 
|  | unsigned long end); | 
|  | void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr); | 
|  | struct folio *swap_cache_get_folio(swp_entry_t entry, | 
|  | struct vm_area_struct *vma, unsigned long addr); | 
|  | struct folio *filemap_get_incore_folio(struct address_space *mapping, | 
|  | pgoff_t index); | 
|  |  | 
|  | struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | 
|  | struct vm_area_struct *vma, unsigned long addr, | 
|  | struct swap_iocb **plug); | 
|  | struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags, | 
|  | struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, | 
|  | bool skip_if_exists); | 
|  | struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag, | 
|  | struct mempolicy *mpol, pgoff_t ilx); | 
|  | struct folio *swapin_readahead(swp_entry_t entry, gfp_t flag, | 
|  | struct vm_fault *vmf); | 
|  |  | 
|  | static inline unsigned int folio_swap_flags(struct folio *folio) | 
|  | { | 
|  | return swp_swap_info(folio->swap)->flags; | 
|  | } | 
|  |  | 
|  | /* | 
|  | * Return the count of contiguous swap entries that share the same | 
|  | * zeromap status as the starting entry. If is_zeromap is not NULL, | 
|  | * it will return the zeromap status of the starting entry. | 
|  | */ | 
|  | static inline int swap_zeromap_batch(swp_entry_t entry, int max_nr, | 
|  | bool *is_zeromap) | 
|  | { | 
|  | struct swap_info_struct *sis = swp_swap_info(entry); | 
|  | unsigned long start = swp_offset(entry); | 
|  | unsigned long end = start + max_nr; | 
|  | bool first_bit; | 
|  |  | 
|  | first_bit = test_bit(start, sis->zeromap); | 
|  | if (is_zeromap) | 
|  | *is_zeromap = first_bit; | 
|  |  | 
|  | if (max_nr <= 1) | 
|  | return max_nr; | 
|  | if (first_bit) | 
|  | return find_next_zero_bit(sis->zeromap, end, start) - start; | 
|  | else | 
|  | return find_next_bit(sis->zeromap, end, start) - start; | 
|  | } | 
|  |  | 
|  | static inline int non_swapcache_batch(swp_entry_t entry, int max_nr) | 
|  | { | 
|  | struct swap_info_struct *si = swp_swap_info(entry); | 
|  | pgoff_t offset = swp_offset(entry); | 
|  | int i; | 
|  |  | 
|  | /* | 
|  | * While allocating a large folio and doing mTHP swapin, we need to | 
|  | * ensure all entries are not cached, otherwise, the mTHP folio will | 
|  | * be in conflict with the folio in swap cache. | 
|  | */ | 
|  | for (i = 0; i < max_nr; i++) { | 
|  | if ((si->swap_map[offset + i] & SWAP_HAS_CACHE)) | 
|  | return i; | 
|  | } | 
|  |  | 
|  | return i; | 
|  | } | 
|  |  | 
|  | #else /* CONFIG_SWAP */ | 
|  | struct swap_iocb; | 
|  | static inline void swap_read_folio(struct folio *folio, struct swap_iocb **plug) | 
|  | { | 
|  | } | 
|  | static inline void swap_write_unplug(struct swap_iocb *sio) | 
|  | { | 
|  | } | 
|  |  | 
|  | static inline struct address_space *swap_address_space(swp_entry_t entry) | 
|  | { | 
|  | return NULL; | 
|  | } | 
|  |  | 
|  | static inline pgoff_t swap_cache_index(swp_entry_t entry) | 
|  | { | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static inline void show_swap_cache_info(void) | 
|  | { | 
|  | } | 
|  |  | 
|  | static inline struct folio *swap_cluster_readahead(swp_entry_t entry, | 
|  | gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx) | 
|  | { | 
|  | return NULL; | 
|  | } | 
|  |  | 
|  | static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, | 
|  | struct vm_fault *vmf) | 
|  | { | 
|  | return NULL; | 
|  | } | 
|  |  | 
|  | static inline int swap_writeout(struct folio *folio, | 
|  | struct swap_iocb **swap_plug) | 
|  | { | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr) | 
|  | { | 
|  | } | 
|  |  | 
|  | static inline struct folio *swap_cache_get_folio(swp_entry_t entry, | 
|  | struct vm_area_struct *vma, unsigned long addr) | 
|  | { | 
|  | return NULL; | 
|  | } | 
|  |  | 
|  | static inline | 
|  | struct folio *filemap_get_incore_folio(struct address_space *mapping, | 
|  | pgoff_t index) | 
|  | { | 
|  | return filemap_get_folio(mapping, index); | 
|  | } | 
|  |  | 
|  | static inline void *get_shadow_from_swap_cache(swp_entry_t entry) | 
|  | { | 
|  | return NULL; | 
|  | } | 
|  |  | 
|  | static inline int add_to_swap_cache(struct folio *folio, swp_entry_t entry, | 
|  | gfp_t gfp_mask, void **shadowp) | 
|  | { | 
|  | return -1; | 
|  | } | 
|  |  | 
|  | static inline void __delete_from_swap_cache(struct folio *folio, | 
|  | swp_entry_t entry, void *shadow) | 
|  | { | 
|  | } | 
|  |  | 
|  | static inline void delete_from_swap_cache(struct folio *folio) | 
|  | { | 
|  | } | 
|  |  | 
|  | static inline void clear_shadow_from_swap_cache(int type, unsigned long begin, | 
|  | unsigned long end) | 
|  | { | 
|  | } | 
|  |  | 
|  | static inline unsigned int folio_swap_flags(struct folio *folio) | 
|  | { | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static inline int swap_zeromap_batch(swp_entry_t entry, int max_nr, | 
|  | bool *has_zeromap) | 
|  | { | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static inline int non_swapcache_batch(swp_entry_t entry, int max_nr) | 
|  | { | 
|  | return 0; | 
|  | } | 
|  | #endif /* CONFIG_SWAP */ | 
|  |  | 
|  | /** | 
|  | * folio_index - File index of a folio. | 
|  | * @folio: The folio. | 
|  | * | 
|  | * For a folio which is either in the page cache or the swap cache, | 
|  | * return its index within the address_space it belongs to.  If you know | 
|  | * the folio is definitely in the page cache, you can look at the folio's | 
|  | * index directly. | 
|  | * | 
|  | * Return: The index (offset in units of pages) of a folio in its file. | 
|  | */ | 
|  | static inline pgoff_t folio_index(struct folio *folio) | 
|  | { | 
|  | if (unlikely(folio_test_swapcache(folio))) | 
|  | return swap_cache_index(folio->swap); | 
|  | return folio->index; | 
|  | } | 
|  |  | 
|  | #endif /* _MM_SWAP_H */ |