This assumption needs the inverse of nth_page(), which is temporarily named page_nth() until it's renamed later in this series. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Reviewed-by: Christoph Hellwig <hch@xxxxxx> Reviewed-by: John Hubbard <jhubbard@xxxxxxxxxx> Reviewed-by: Jason Gunthorpe <jgg@xxxxxxxxxx> Reviewed-by: William Kucharski <william.kucharski@xxxxxxxxxx> --- include/linux/mm.h | 2 ++ mm/gup.c | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 213cc569b192..e679a7d66200 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -216,8 +216,10 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) +#define page_nth(head, tail) (page_to_pfn(tail) - page_to_pfn(head)) #else #define nth_page(page,n) ((page) + (n)) +#define page_nth(head, tail) ((tail) - (head)) #endif /* to align the pointer to the (next) page boundary */ diff --git a/mm/gup.c b/mm/gup.c index 29a8021f10a2..fa75b71820a2 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -263,8 +263,8 @@ static inline struct page *compound_range_next(struct page *start, next = nth_page(start, i); page = compound_head(next); if (PageHead(page)) - nr = min_t(unsigned int, - page + compound_nr(page) - next, npages - i); + nr = min_t(unsigned int, npages - i, + compound_nr(page) - page_nth(page, next)); *ntails = nr; return page; -- 2.34.1