The patch titled Subject: mm: add an 'end' parameter to find_get_entries has been added to the -mm tree. Its filename is mm-add-an-end-parameter-to-find_get_entries.patch This patch should soon appear at https://ozlabs.org/~akpm/mmots/broken-out/mm-add-an-end-parameter-to-find_get_entries.patch and later at https://ozlabs.org/~akpm/mmotm/broken-out/mm-add-an-end-parameter-to-find_get_entries.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Subject: mm: add an 'end' parameter to find_get_entries This simplifies the callers and leads to a more efficient implementation since the XArray has this functionality already. Link: https://lkml.kernel.org/r/20201112212641.27837-11-willy@xxxxxxxxxxxxx Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Reviewed-by: Jan Kara <jack@xxxxxxx> Reviewed-by: William Kucharski <william.kucharski@xxxxxxxxxx> Cc: Christoph Hellwig <hch@xxxxxx> Cc: Dave Chinner <dchinner@xxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> Cc: Yang Shi <yang.shi@xxxxxxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/pagemap.h | 4 ++-- mm/filemap.c | 9 +++++---- mm/shmem.c | 10 ++-------- mm/swap.c | 2 +- 4 files changed, 10 insertions(+), 15 deletions(-) --- a/include/linux/pagemap.h~mm-add-an-end-parameter-to-find_get_entries +++ a/include/linux/pagemap.h @@ -451,8 +451,8 @@ static inline struct page *find_subpage( } unsigned find_get_entries(struct address_space *mapping, pgoff_t start, - unsigned int nr_entries, struct page **entries, - pgoff_t *indices); + pgoff_t end, unsigned int nr_entries, struct page **entries, + pgoff_t *indices); unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, pgoff_t end, unsigned int nr_pages, struct page **pages); --- a/mm/filemap.c~mm-add-an-end-parameter-to-find_get_entries +++ a/mm/filemap.c @@ -1870,6 +1870,7 @@ reset: * find_get_entries - gang pagecache lookup * @mapping: The address_space to search * @start: The starting page cache index + * @end: The final page index (inclusive). * @nr_entries: The maximum number of entries * @entries: Where the resulting entries are placed * @indices: The cache indices corresponding to the entries in @entries @@ -1893,9 +1894,9 @@ reset: * * Return: the number of pages and shadow entries which were found. */ -unsigned find_get_entries(struct address_space *mapping, - pgoff_t start, unsigned int nr_entries, - struct page **entries, pgoff_t *indices) +unsigned find_get_entries(struct address_space *mapping, pgoff_t start, + pgoff_t end, unsigned int nr_entries, struct page **entries, + pgoff_t *indices) { XA_STATE(xas, &mapping->i_pages, start); struct page *page; @@ -1905,7 +1906,7 @@ unsigned find_get_entries(struct address return 0; rcu_read_lock(); - while ((page = find_get_entry(&xas, ULONG_MAX, XA_PRESENT))) { + while ((page = find_get_entry(&xas, end, XA_PRESENT))) { /* * Terminate early on finding a THP, to allow the caller to * handle it all at once; but continue if this is hugetlbfs. --- a/mm/shmem.c~mm-add-an-end-parameter-to-find_get_entries +++ a/mm/shmem.c @@ -913,8 +913,6 @@ static void shmem_undo_range(struct inod struct page *page = pvec.pages[i]; index = indices[i]; - if (index >= end) - break; if (xa_is_value(page)) { if (unfalloc) @@ -967,9 +965,8 @@ static void shmem_undo_range(struct inod while (index < end) { cond_resched(); - pvec.nr = find_get_entries(mapping, index, - min(end - index, (pgoff_t)PAGEVEC_SIZE), - pvec.pages, indices); + pvec.nr = find_get_entries(mapping, index, end - 1, + PAGEVEC_SIZE, pvec.pages, indices); if (!pvec.nr) { /* If all gone or hole-punch or unfalloc, we're done */ if (index == start || end != -1) @@ -982,9 +979,6 @@ static void shmem_undo_range(struct inod struct page *page = pvec.pages[i]; index = indices[i]; - if (index >= end) - break; - if (xa_is_value(page)) { if (unfalloc) continue; --- a/mm/swap.c~mm-add-an-end-parameter-to-find_get_entries +++ a/mm/swap.c @@ -1102,7 +1102,7 @@ unsigned pagevec_lookup_entries(struct p pgoff_t start, unsigned nr_entries, pgoff_t *indices) { - pvec->nr = find_get_entries(mapping, start, nr_entries, + pvec->nr = find_get_entries(mapping, start, ULONG_MAX, nr_entries, pvec->pages, indices); return pagevec_count(pvec); } _ Patches currently in -mm which might be from willy@xxxxxxxxxxxxx are mm-fix-readahead_page_batch-for-retry-entries.patch mm-fix-madvise-willneed-performance-problem.patch mm-page-flags-fix-comment.patch mm-page_alloc-add-__free_pages-documentation.patch mm-make-pagecache-tagged-lookups-return-only-head-pages.patch mm-shmem-use-pagevec_lookup-in-shmem_unlock_mapping.patch mm-swap-optimise-get_shadow_from_swap_cache.patch mm-add-fgp_entry.patch mm-filemap-rename-find_get_entry-to-mapping_get_entry.patch mm-filemap-add-helper-for-finding-pages.patch mm-filemap-add-mapping_seek_hole_data.patch iomap-use-mapping_seek_hole_data.patch mm-add-and-use-find_lock_entries.patch mm-add-an-end-parameter-to-find_get_entries.patch mm-add-an-end-parameter-to-pagevec_lookup_entries.patch mm-remove-nr_entries-parameter-from-pagevec_lookup_entries.patch mm-pass-pvec-directly-to-find_get_entries.patch mm-remove-pagevec_lookup_entries.patch mm-truncateshmem-handle-truncates-that-split-thps.patch mm-filemap-return-only-head-pages-from-find_get_entries.patch mm-introduce-memfd_secret-system-call-to-create-secret-memory-areas-fix.patch