Let's convert restore_exclusive_pte() and do_swap_page(). While at it, perform some folio conversion in restore_exclusive_pte(). Signed-off-by: David Hildenbrand <david@xxxxxxxxxx> --- mm/memory.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index be7fe58f7c297..9543b6e2b749b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -706,6 +706,7 @@ static void restore_exclusive_pte(struct vm_area_struct *vma, struct page *page, unsigned long address, pte_t *ptep) { + struct folio *folio = page_folio(page); pte_t orig_pte; pte_t pte; swp_entry_t entry; @@ -721,14 +722,15 @@ static void restore_exclusive_pte(struct vm_area_struct *vma, else if (is_writable_device_exclusive_entry(entry)) pte = maybe_mkwrite(pte_mkdirty(pte), vma); - VM_BUG_ON(pte_write(pte) && !(PageAnon(page) && PageAnonExclusive(page))); + VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) && + PageAnonExclusive(page)), folio); /* * No need to take a page reference as one was already * created when the swap entry was made. */ - if (PageAnon(page)) - page_add_anon_rmap(page, vma, address, RMAP_NONE); + if (folio_test_anon(folio)) + folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE); else /* * Currently device exclusive access only supports anonymous @@ -4065,7 +4067,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) page_add_new_anon_rmap(page, vma, vmf->address); folio_add_lru_vma(folio, vma); } else { - page_add_anon_rmap(page, vma, vmf->address, rmap_flags); + folio_add_anon_rmap_pte(folio, page, vma, vmf->address, + rmap_flags); } VM_BUG_ON(!folio_test_anon(folio) || -- 2.41.0