With damon_get_folio(), let's convert damon_young_pmd_entry() to use folios. Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> --- mm/damon/vaddr.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 15f03df66db6..29227b7a6032 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -431,7 +431,7 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr, { pte_t *pte; spinlock_t *ptl; - struct page *page; + struct folio *folio; struct damon_young_walk_private *priv = walk->private; #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -446,16 +446,16 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr, spin_unlock(ptl); goto regular_page; } - page = damon_get_page(pmd_pfn(*pmd)); - if (!page) + folio = damon_get_folio(pmd_pfn(*pmd)); + if (!folio) goto huge_out; - if (pmd_young(*pmd) || !page_is_idle(page) || + if (pmd_young(*pmd) || !folio_test_idle(folio) || mmu_notifier_test_young(walk->mm, addr)) { *priv->page_sz = HPAGE_PMD_SIZE; priv->young = true; } - put_page(page); + folio_put(folio); huge_out: spin_unlock(ptl); return 0; @@ -469,15 +469,15 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr, pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); if (!pte_present(*pte)) goto out; - page = damon_get_page(pte_pfn(*pte)); - if (!page) + folio = damon_get_folio(pte_pfn(*pte)); + if (!folio) goto out; - if (pte_young(*pte) || !page_is_idle(page) || + if (pte_young(*pte) || !folio_test_idle(folio) || mmu_notifier_test_young(walk->mm, addr)) { *priv->page_sz = PAGE_SIZE; priv->young = true; } - put_page(page); + folio_put(folio); out: pte_unmap_unlock(pte, ptl); return 0; -- 2.35.3