On 1/2/22 13:57, Matthew Wilcox (Oracle) wrote:
There should be little to no effect from this patch; just removing
uses of some old APIs.
While I'm looking at this, take the opportunity to use nth_page()
instead of doing the arithmetic ourselves in case hugetlbfs pages
are ever allocated across memmap boundaries.
Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
---
mm/gup.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
Reviewed-by: John Hubbard <jhubbard@xxxxxxxxxx>
thanks,
--
John Hubbard
NVIDIA
diff --git a/mm/gup.c b/mm/gup.c
index d8535f9d5622..1c7fb668b46d 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2435,7 +2435,7 @@ static int record_subpages(struct page *page, unsigned long addr,
int nr;
for (nr = 0; addr != end; addr += PAGE_SIZE)
- pages[nr++] = page++;
+ pages[nr++] = nth_page(page, nr);
return nr;
}
@@ -2453,7 +2453,8 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
struct page **pages, int *nr)
{
unsigned long pte_end;
- struct page *head, *page;
+ struct page *page;
+ struct folio *folio;
pte_t pte;
int refs;
@@ -2469,21 +2470,20 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
/* hugepages are never "special" */
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
- head = pte_page(pte);
- page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
+ page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT);
refs = record_subpages(page, addr, end, pages + *nr);
- head = try_grab_compound_head(head, refs, flags);
- if (!head)
+ folio = try_grab_folio(page, refs, flags);
+ if (!folio)
return 0;
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
- put_compound_head(head, refs, flags);
+ gup_put_folio(folio, refs, flags);
return 0;
}
*nr += refs;
- SetPageReferenced(head);
+ folio_set_referenced(folio);
return 1;
}