Let's convert copy_nonpresent_pte(). While at it, perform some more folio conversion. Signed-off-by: David Hildenbrand <david@xxxxxxxxxx> --- mm/memory.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index eaab6a2e14eba..ad6da8168e461 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -781,6 +781,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, unsigned long vm_flags = dst_vma->vm_flags; pte_t orig_pte = ptep_get(src_pte); pte_t pte = orig_pte; + struct folio *folio; struct page *page; swp_entry_t entry = pte_to_swp_entry(orig_pte); @@ -825,6 +826,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, } } else if (is_device_private_entry(entry)) { page = pfn_swap_entry_to_page(entry); + folio = page_folio(page); /* * Update rss count even for unaddressable pages, as @@ -835,10 +837,10 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, * for unaddressable pages, at some point. But for now * keep things as they are. */ - get_page(page); + folio_get(folio); rss[mm_counter(page)]++; /* Cannot fail as these pages cannot get pinned. */ - BUG_ON(page_try_dup_anon_rmap(page, false, src_vma)); + folio_try_dup_anon_rmap_pte(folio, page, src_vma); /* * We do not preserve soft-dirty information, because so @@ -952,7 +954,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, * future. */ folio_get(folio); - if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) { + if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) { /* Page may be pinned, we have to copy. */ folio_put(folio); return copy_present_page(dst_vma, src_vma, dst_pte, src_pte, -- 2.41.0