Previously, mTHP could only be mapped to PTEs where all entries were none. With this change, PTEs within the range mapping the demand-zero page can now be treated as `pte_none` and remapped to a new mTHP, providing more opportunities to take advantage of mTHP. Signed-off-by: Mingzhe Yang <mingzhe.yang@xxxxxx> Signed-off-by: Lance Yang <ioworker0@xxxxxxxxx> --- mm/memory.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 4e148309b3e0..99ec75c6f0fe 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4815,7 +4815,8 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf) order = highest_order(orders); while (orders) { addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); - if (pte_range_none(pte + pte_index(addr), 1 << order)) + if (pte_range_none_or_zeropfn(pte + pte_index(addr), 1 << order, + NULL)) break; order = next_order(&orders, order); } @@ -4867,6 +4868,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; unsigned long addr = vmf->address; + bool any_zeropfn = false; struct folio *folio; vm_fault_t ret = 0; int nr_pages = 1; @@ -4939,7 +4941,8 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) if (nr_pages == 1 && vmf_pte_changed(vmf)) { update_mmu_tlb(vma, addr, vmf->pte); goto release; - } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { + } else if (nr_pages > 1 && !pte_range_none_or_zeropfn( + vmf->pte, nr_pages, &any_zeropfn)) { update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); goto release; } @@ -4965,6 +4968,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) entry = pte_mkuffd_wp(entry); set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages); + /* At least one PTE was mapped to the zero page */ + if (nr_pages > 1 && any_zeropfn) + flush_tlb_range(vma, addr, addr + (nr_pages * PAGE_SIZE)); + /* No need to invalidate - it was non-present before */ update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages); unlock: -- 2.45.2