| From: Yu Zhao <yuzhao@google.com> |
| Subject: mm-truncate-batch-clear-shadow-entries-v2 |
| Date: Wed, 10 Jul 2024 00:09:33 -0600 |
| |
| restore comment, rename clear_shadow_entry() to clear_shadow_entries() |
| |
| Link: https://lkml.kernel.org/r/20240710060933.3979380-1-yuzhao@google.com |
| Reported-by: Bharata B Rao <bharata@amd.com> |
| Closes: https://lore.kernel.org/d2841226-e27b-4d3d-a578-63587a3aa4f3@amd.com/ |
| Tested-by: Bharata B Rao <bharata@amd.com> |
| Signed-off-by: Yu Zhao <yuzhao@google.com> |
| Cc: Johannes Weiner <hannes@cmpxchg.org> |
| Cc: Mel Gorman <mgorman@techsingularity.net> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| mm/truncate.c | 9 +++++---- |
| 1 file changed, 5 insertions(+), 4 deletions(-) |
| |
| --- a/mm/truncate.c~mm-truncate-batch-clear-shadow-entries-v2 |
| +++ a/mm/truncate.c |
| @@ -39,11 +39,12 @@ static inline void __clear_shadow_entry( |
| xas_store(&xas, NULL); |
| } |
| |
| -static void clear_shadow_entry(struct address_space *mapping, |
| - struct folio_batch *fbatch, pgoff_t *indices) |
| +static void clear_shadow_entries(struct address_space *mapping, |
| + struct folio_batch *fbatch, pgoff_t *indices) |
| { |
| int i; |
| |
| + /* Handled by shmem itself, or for DAX we do nothing. */ |
| if (shmem_mapping(mapping) || dax_mapping(mapping)) |
| return; |
| |
| @@ -507,7 +508,7 @@ unsigned long mapping_try_invalidate(str |
| } |
| |
| if (xa_has_values) |
| - clear_shadow_entry(mapping, &fbatch, indices); |
| + clear_shadow_entries(mapping, &fbatch, indices); |
| |
| folio_batch_remove_exceptionals(&fbatch); |
| folio_batch_release(&fbatch); |
| @@ -657,7 +658,7 @@ int invalidate_inode_pages2_range(struct |
| } |
| |
| if (xa_has_values) |
| - clear_shadow_entry(mapping, &fbatch, indices); |
| + clear_shadow_entries(mapping, &fbatch, indices); |
| |
| folio_batch_remove_exceptionals(&fbatch); |
| folio_batch_release(&fbatch); |
| _ |