On Wed, Aug 19, 2020 at 04:05:51PM +0100, Matthew Wilcox (Oracle) wrote: > This simplifies the callers and leads to a more efficient implementation > since the XArray has this functionality already. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> > --- > include/linux/pagemap.h | 4 ++-- > mm/filemap.c | 9 +++++---- > mm/shmem.c | 10 ++++------ > mm/swap.c | 2 +- > 4 files changed, 12 insertions(+), 13 deletions(-) > > diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h > index 7de11dcd534d..3f0dc8d00f2a 100644 > --- a/include/linux/pagemap.h > +++ b/include/linux/pagemap.h > @@ -387,8 +387,8 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index) > struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); > struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); > unsigned find_get_entries(struct address_space *mapping, pgoff_t start, > - unsigned int nr_entries, struct page **entries, > - pgoff_t *indices); > + pgoff_t end, unsigned int nr_entries, struct page **entries, > + pgoff_t *indices); > unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, > pgoff_t end, unsigned int nr_pages, > struct page **pages); > diff --git a/mm/filemap.c b/mm/filemap.c > index 1aaea26556cc..159cf3d6f1ae 100644 > --- a/mm/filemap.c > +++ b/mm/filemap.c > @@ -1742,6 +1742,7 @@ EXPORT_SYMBOL(pagecache_get_page); > * find_get_entries - gang pagecache lookup > * @mapping: The address_space to search > * @start: The starting page cache index > + * @end: The highest page cache index to return. Maybe add here whether 'end' is inclusive or exclusive? > * @nr_entries: The maximum number of entries > * @entries: Where the resulting entries are placed > * @indices: The cache indices corresponding to the entries in @entries > @@ -1765,9 +1766,9 @@ EXPORT_SYMBOL(pagecache_get_page); > * > * Return: the number of pages and shadow entries which were found. > */ > -unsigned find_get_entries(struct address_space *mapping, > - pgoff_t start, unsigned int nr_entries, > - struct page **entries, pgoff_t *indices) > +unsigned find_get_entries(struct address_space *mapping, pgoff_t start, > + pgoff_t end, unsigned int nr_entries, struct page **entries, > + pgoff_t *indices) > { > XA_STATE(xas, &mapping->i_pages, start); > struct page *page; > @@ -1777,7 +1778,7 @@ unsigned find_get_entries(struct address_space *mapping, > return 0; > > rcu_read_lock(); > - xas_for_each(&xas, page, ULONG_MAX) { > + xas_for_each(&xas, page, end) { > if (xas_retry(&xas, page)) > continue; > /* > diff --git a/mm/shmem.c b/mm/shmem.c > index 0f9f149f4b5e..abdbe61a1aa7 100644 > --- a/mm/shmem.c > +++ b/mm/shmem.c > @@ -906,9 +906,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, > pagevec_init(&pvec); > index = start; > while (index < end) { > - pvec.nr = find_get_entries(mapping, index, > - min(end - index, (pgoff_t)PAGEVEC_SIZE), > - pvec.pages, indices); > + pvec.nr = find_get_entries(mapping, index, end - 1, > + PAGEVEC_SIZE, pvec.pages, indices); > if (!pvec.nr) > break; > for (i = 0; i < pagevec_count(&pvec); i++) { > @@ -977,9 +976,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, > while (index < end) { > cond_resched(); > > - pvec.nr = find_get_entries(mapping, index, > - min(end - index, (pgoff_t)PAGEVEC_SIZE), > - pvec.pages, indices); > + pvec.nr = find_get_entries(mapping, index, end - 1, > + PAGEVEC_SIZE, pvec.pages, indices); > if (!pvec.nr) { > /* If all gone or hole-punch or unfalloc, we're done */ > if (index == start || end != -1) > diff --git a/mm/swap.c b/mm/swap.c > index d16d65d9b4e0..fcf6ccb94b09 100644 > --- a/mm/swap.c > +++ b/mm/swap.c > @@ -1060,7 +1060,7 @@ unsigned pagevec_lookup_entries(struct pagevec *pvec, > pgoff_t start, unsigned nr_entries, > pgoff_t *indices) > { > - pvec->nr = find_get_entries(mapping, start, nr_entries, > + pvec->nr = find_get_entries(mapping, start, ULONG_MAX, nr_entries, > pvec->pages, indices); > return pagevec_count(pvec); > } > -- > 2.28.0 > > -- Sincerely yours, Mike.