After the commit 19eaf44954df ("mm: thp: support allocation of anonymous multi-size THP"), it may need to batch update tlb of an address range through the update_mmu_tlb function. We can simplify this operation by adding the update_mmu_tlb_range function, which may also reduce the execution of some unnecessary code in some architectures. Signed-off-by: Bang Li <libang.li@xxxxxxxxxxxx> --- include/linux/pgtable.h | 8 ++++++++ mm/memory.c | 4 +--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 18019f037bae..869bfe6054f1 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -737,6 +737,14 @@ static inline void update_mmu_tlb(struct vm_area_struct *vma, #define __HAVE_ARCH_UPDATE_MMU_TLB #endif +#ifndef __HAVE_ARCH_UPDATE_MMU_TLB_RANGE +static inline void update_mmu_tlb_range(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, unsigned int nr) +{ +} +#define __HAVE_ARCH_UPDATE_MMU_TLB_RANGE +#endif + /* * Some architectures may be able to avoid expensive synchronization * primitives when modifications are made to PTE's which are already diff --git a/mm/memory.c b/mm/memory.c index eea6e4984eae..2d53e29cf76e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4421,7 +4421,6 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) vm_fault_t ret = 0; int nr_pages = 1; pte_t entry; - int i; /* File mapping without ->vm_ops ? */ if (vma->vm_flags & VM_SHARED) @@ -4491,8 +4490,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) update_mmu_tlb(vma, addr, vmf->pte); goto release; } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { - for (i = 0; i < nr_pages; i++) - update_mmu_tlb(vma, addr + PAGE_SIZE * i, vmf->pte + i); + update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); goto release; } -- 2.19.1.6.gb485710b