After the commit 19eaf44954df ("mm: thp: support allocation of anonymous multi-size THP"), it may need to batch update tlb of an address range through the update_mmu_tlb function. We can simplify this operation by adding the update_mmu_tlb_range function, which may also reduce the execution of some unnecessary code in some architectures. Signed-off-by: Bang Li <libang.li@xxxxxxxxxxxx> --- include/linux/pgtable.h | 5 +++++ mm/memory.c | 4 +--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 18019f037bae..73411dfebf7a 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -734,6 +734,11 @@ static inline void update_mmu_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { } + +static inline void update_mmu_tlb_range(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, unsigned int nr) +{ +} #define __HAVE_ARCH_UPDATE_MMU_TLB #endif diff --git a/mm/memory.c b/mm/memory.c index 6647685fd3c4..1f0ca362b82a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4396,7 +4396,6 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) vm_fault_t ret = 0; int nr_pages = 1; pte_t entry; - int i; /* File mapping without ->vm_ops ? */ if (vma->vm_flags & VM_SHARED) @@ -4465,8 +4464,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) update_mmu_tlb(vma, addr, vmf->pte); goto release; } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { - for (i = 0; i < nr_pages; i++) - update_mmu_tlb(vma, addr + PAGE_SIZE * i, vmf->pte + i); + update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); goto release; } -- 2.19.1.6.gb485710b