The indices array is unnecessary. Folios keep track of their xarray indices in the folio->index field which can simply be accessed as needed. This change removes the indices argument from find_lock_entries() and find_get_entries(). All of the callers are able to remove their indices arrays as well. Signed-off-by: Vishal Moola (Oracle) <vishal.moola@xxxxxxxxx> --- mm/filemap.c | 8 ++------ mm/internal.h | 4 ++-- mm/shmem.c | 6 ++---- mm/truncate.c | 12 ++++-------- 4 files changed, 10 insertions(+), 20 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index 1b8022c18dc7..1f6be113a214 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2034,7 +2034,6 @@ static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max, * @start: The starting page cache index * @end: The final page index (inclusive). * @fbatch: Where the resulting entries are placed. - * @indices: The cache indices corresponding to the entries in @entries * * find_get_entries() will search for and return a batch of entries in * the mapping. The entries are placed in @fbatch. find_get_entries() @@ -2050,7 +2049,7 @@ static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max, * Also updates @start to be positioned after the last found entry */ unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, - pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices) + pgoff_t end, struct folio_batch *fbatch) { XA_STATE(xas, &mapping->i_pages, *start); unsigned long nr; @@ -2058,7 +2057,6 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, rcu_read_lock(); while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) { - indices[fbatch->nr] = xas.xa_index; if (!folio_batch_add(fbatch, folio)) break; } @@ -2082,7 +2080,6 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, * @start: The starting page cache index. * @end: The final page index (inclusive). * @fbatch: Where the resulting entries are placed. - * @indices: The cache indices of the entries in @fbatch. * * find_lock_entries() will return a batch of entries from @mapping. * Swap, shadow and DAX entries are included. Folios are returned @@ -2098,7 +2095,7 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, * Also updates @start to be positioned after the last found entry */ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, - pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices) + pgoff_t end, struct folio_batch *fbatch) { XA_STATE(xas, &mapping->i_pages, *start); unsigned long nr; @@ -2119,7 +2116,6 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index), folio); } - indices[fbatch->nr] = xas.xa_index; if (!folio_batch_add(fbatch, folio)) break; continue; diff --git a/mm/internal.h b/mm/internal.h index 68afdbe7106e..db8d5dfa6d68 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -107,9 +107,9 @@ static inline void force_page_cache_readahead(struct address_space *mapping, } unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, - pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); + pgoff_t end, struct folio_batch *fbatch); unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, - pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); + pgoff_t end, struct folio_batch *fbatch); void filemap_free_folio(struct address_space *mapping, struct folio *folio); int truncate_inode_folio(struct address_space *mapping, struct folio *folio); bool truncate_inode_partial_folio(struct folio *folio, loff_t start, diff --git a/mm/shmem.c b/mm/shmem.c index 8240e066edfc..ad6b5adf04ac 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -907,7 +907,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; pgoff_t end = (lend + 1) >> PAGE_SHIFT; struct folio_batch fbatch; - pgoff_t indices[PAGEVEC_SIZE]; struct folio *folio; bool same_folio; long nr_swaps_freed = 0; @@ -923,7 +922,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, folio_batch_init(&fbatch); index = start; while (index < end && find_lock_entries(mapping, &index, end - 1, - &fbatch, indices)) { + &fbatch)) { for (i = 0; i < folio_batch_count(&fbatch); i++) { folio = fbatch.folios[i]; @@ -973,8 +972,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, while (index < end) { cond_resched(); - if (!find_get_entries(mapping, &index, end - 1, &fbatch, - indices)) { + if (!find_get_entries(mapping, &index, end - 1, &fbatch)) { /* If all gone or hole-punch or unfalloc, we're done */ if (index == start || end != -1) break; diff --git a/mm/truncate.c b/mm/truncate.c index 4e63d885498a..9db247a88483 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -332,7 +332,6 @@ void truncate_inode_pages_range(struct address_space *mapping, pgoff_t start; /* inclusive */ pgoff_t end; /* exclusive */ struct folio_batch fbatch; - pgoff_t indices[PAGEVEC_SIZE]; pgoff_t index; int i; struct folio *folio; @@ -361,7 +360,7 @@ void truncate_inode_pages_range(struct address_space *mapping, folio_batch_init(&fbatch); index = start; while (index < end && find_lock_entries(mapping, &index, end - 1, - &fbatch, indices)) { + &fbatch)) { truncate_folio_batch_exceptionals(mapping, &fbatch); for (i = 0; i < folio_batch_count(&fbatch); i++) truncate_cleanup_folio(fbatch.folios[i]); @@ -399,8 +398,7 @@ void truncate_inode_pages_range(struct address_space *mapping, index = start; while (index < end) { cond_resched(); - if (!find_get_entries(mapping, &index, end - 1, &fbatch, - indices)) { + if (!find_get_entries(mapping, &index, end - 1, &fbatch)) { /* If all gone from start onwards, we're done */ if (index == start) break; @@ -497,7 +495,6 @@ EXPORT_SYMBOL(truncate_inode_pages_final); unsigned long invalidate_mapping_pagevec(struct address_space *mapping, pgoff_t start, pgoff_t end, unsigned long *nr_pagevec) { - pgoff_t indices[PAGEVEC_SIZE]; struct folio_batch fbatch; pgoff_t index = start; unsigned long ret; @@ -505,7 +502,7 @@ unsigned long invalidate_mapping_pagevec(struct address_space *mapping, int i; folio_batch_init(&fbatch); - while (find_lock_entries(mapping, &index, end, &fbatch, indices)) { + while (find_lock_entries(mapping, &index, end, &fbatch)) { for (i = 0; i < folio_batch_count(&fbatch); i++) { struct folio *folio = fbatch.folios[i]; @@ -620,7 +617,6 @@ static int folio_launder(struct address_space *mapping, struct folio *folio) int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end) { - pgoff_t indices[PAGEVEC_SIZE]; struct folio_batch fbatch; pgoff_t index; int i; @@ -633,7 +629,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, folio_batch_init(&fbatch); index = start; - while (find_get_entries(mapping, &index, end, &fbatch, indices)) { + while (find_get_entries(mapping, &index, end, &fbatch)) { for (i = 0; i < folio_batch_count(&fbatch); i++) { struct folio *folio = fbatch.folios[i]; -- 2.36.1