From: "Alex Shi (tencent)" <alexs@xxxxxxxxxx> Compound page is checked and skipped before write_protect_page() called, use folio to save few compound_head checking and also remove duplicated compound checking again in the func. Signed-off-by: Alex Shi (tencent) <alexs@xxxxxxxxxx> Cc: Izik Eidus <izik.eidus@xxxxxxxxxxxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Chris Wright <chrisw@xxxxxxxxxxxx> --- mm/ksm.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index 165a3e4162bf..ad3a0294a2ec 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1275,23 +1275,21 @@ static u32 calc_checksum(struct page *page) return checksum; } -static int write_protect_page(struct vm_area_struct *vma, struct page *page, +static int write_protect_page(struct vm_area_struct *vma, struct folio *folio, pte_t *orig_pte) { struct mm_struct *mm = vma->vm_mm; - DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0); + DEFINE_PAGE_VMA_WALK(pvmw, &folio->page, vma, 0, 0); int swapped; int err = -EFAULT; struct mmu_notifier_range range; bool anon_exclusive; pte_t entry; - pvmw.address = page_address_in_vma(page, vma); + pvmw.address = page_address_in_vma(&folio->page, vma); if (pvmw.address == -EFAULT) goto out; - BUG_ON(PageTransCompound(page)); - mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address, pvmw.address + PAGE_SIZE); mmu_notifier_invalidate_range_start(&range); @@ -1301,12 +1299,12 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) goto out_unlock; - anon_exclusive = PageAnonExclusive(page); + anon_exclusive = PageAnonExclusive(&folio->page); entry = ptep_get(pvmw.pte); if (pte_write(entry) || pte_dirty(entry) || anon_exclusive || mm_tlb_flush_pending(mm)) { - swapped = PageSwapCache(page); - flush_cache_page(vma, pvmw.address, page_to_pfn(page)); + swapped = folio_test_swapcache(folio); + flush_cache_page(vma, pvmw.address, folio_pfn(folio)); /* * Ok this is tricky, when get_user_pages_fast() run it doesn't * take any lock, therefore the check that we are going to make @@ -1326,20 +1324,20 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, * Check that no O_DIRECT or similar I/O is in progress on the * page */ - if (page_mapcount(page) + 1 + swapped != page_count(page)) { + if (folio_mapcount(folio) + 1 + swapped != folio_ref_count(folio)) { set_pte_at(mm, pvmw.address, pvmw.pte, entry); goto out_unlock; } /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ if (anon_exclusive && - folio_try_share_anon_rmap_pte(page_folio(page), page)) { + folio_try_share_anon_rmap_pte(folio, &folio->page)) { set_pte_at(mm, pvmw.address, pvmw.pte, entry); goto out_unlock; } if (pte_dirty(entry)) - set_page_dirty(page); + folio_mark_dirty(folio); entry = pte_mkclean(entry); if (pte_write(entry)) @@ -1505,7 +1503,7 @@ static int try_to_merge_one_page(struct vm_area_struct *vma, * ptes are necessarily already write-protected. But in either * case, we need to lock and check page_count is not raised. */ - if (write_protect_page(vma, page, &orig_pte) == 0) { + if (write_protect_page(vma, (struct folio *)page, &orig_pte) == 0) { if (!kpage) { /* * While we hold page lock, upgrade page from -- 2.43.0