folio_mkclean() already passes down a head page, so convert it back to a folio. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/rmap.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index d62a6fcef318..18ae6bd79efd 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -931,6 +931,7 @@ int page_referenced(struct page *page, static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) { + struct folio *folio = page_folio(page); struct page_vma_mapped_walk pvmw = { .vma = vma, .address = address, @@ -942,7 +943,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, pvmw_set_page(&pvmw, page); /* * We have to assume the worse case ie pmd for invalidation. Note that - * the page can not be free from this function. + * the folio can not be freed from this function. */ mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, vma, vma->vm_mm, address, @@ -974,14 +975,14 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) continue; - flush_cache_page(vma, address, page_to_pfn(page)); + flush_cache_page(vma, address, folio_pfn(folio)); entry = pmdp_invalidate(vma, address, pmd); entry = pmd_wrprotect(entry); entry = pmd_mkclean(entry); set_pmd_at(vma->vm_mm, address, pmd, entry); ret = 1; #else - /* unexpected pmd-mapped page? */ + /* unexpected pmd-mapped folio? */ WARN_ON_ONCE(1); #endif } -- 2.34.1