Change __do_huge_pmd_anonymous_page() to take folio as input, as its caller has used folio. Save one unnecessary call to compound_head(). Signed-off-by: Jianfeng Wang <jianfeng.w.wang@xxxxxxxxxx> --- mm/huge_memory.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 89f58c7603b2..83566ee738e0 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -866,10 +866,9 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, EXPORT_SYMBOL_GPL(thp_get_unmapped_area); static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, - struct page *page, gfp_t gfp) + struct folio *folio, gfp_t gfp) { struct vm_area_struct *vma = vmf->vma; - struct folio *folio = page_folio(page); pgtable_t pgtable; unsigned long haddr = vmf->address & HPAGE_PMD_MASK; vm_fault_t ret = 0; @@ -890,7 +889,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, goto release; } - clear_huge_page(page, vmf->address, HPAGE_PMD_NR); + clear_huge_page(&folio->page, vmf->address, HPAGE_PMD_NR); /* * The memory barrier inside __folio_mark_uptodate makes sure that * clear_huge_page writes become visible before the set_pmd_at() @@ -918,7 +917,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, return ret; } - entry = mk_huge_pmd(page, vma->vm_page_prot); + entry = mk_huge_pmd(&folio->page, vma->vm_page_prot); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); folio_add_new_anon_rmap(folio, vma, haddr); folio_add_lru_vma(folio, vma); @@ -1051,7 +1050,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) count_vm_event(THP_FAULT_FALLBACK); return VM_FAULT_FALLBACK; } - return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp); + return __do_huge_pmd_anonymous_page(vmf, folio, gfp); } static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, -- 2.42.1