Let's make update_mmu_tlb() simply a generic wrapper around update_mmu_tlb_range(). Only the latter can now be overridden by the architecture. We can now remove __HAVE_ARCH_UPDATE_MMU_TLB as well. Acked-by: David Hildenbrand <david@xxxxxxxxxx> Signed-off-by: Bang Li <libang.li@xxxxxxxxxxxx> --- arch/loongarch/include/asm/pgtable.h | 2 -- arch/mips/include/asm/pgtable.h | 2 -- arch/riscv/include/asm/pgtable.h | 2 -- arch/xtensa/include/asm/pgtable.h | 3 --- arch/xtensa/mm/tlb.c | 6 ------ include/linux/pgtable.h | 4 +--- 6 files changed, 1 insertion(+), 18 deletions(-) diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index 5ccc2a3a6f7a..161dd6e10479 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -467,8 +467,6 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, #define update_mmu_cache(vma, addr, ptep) \ update_mmu_cache_range(NULL, vma, addr, ptep, 1) -#define __HAVE_ARCH_UPDATE_MMU_TLB -#define update_mmu_tlb update_mmu_cache #define update_mmu_tlb_range(vma, addr, ptep, nr) \ update_mmu_cache_range(NULL, vma, addr, ptep, nr) diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 0891ad7d43b6..c29a551eb0ca 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -594,8 +594,6 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, #define update_mmu_cache(vma, address, ptep) \ update_mmu_cache_range(NULL, vma, address, ptep, 1) -#define __HAVE_ARCH_UPDATE_MMU_TLB -#define update_mmu_tlb update_mmu_cache #define update_mmu_tlb_range(vma, address, ptep, nr) \ update_mmu_cache_range(NULL, vma, address, ptep, nr) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 4630815b0253..ddfd6f8cdcb3 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -489,8 +489,6 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, #define update_mmu_cache(vma, addr, ptep) \ update_mmu_cache_range(NULL, vma, addr, ptep, 1) -#define __HAVE_ARCH_UPDATE_MMU_TLB -#define update_mmu_tlb update_mmu_cache #define update_mmu_tlb_range(vma, addr, ptep, nr) \ update_mmu_cache_range(NULL, vma, addr, ptep, nr) diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 436158bd9030..1647a7cc3fbf 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -410,9 +410,6 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, typedef pte_t *pte_addr_t; -void update_mmu_tlb(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep); -#define __HAVE_ARCH_UPDATE_MMU_TLB void update_mmu_tlb_range(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, unsigned int nr); #define update_mmu_tlb_range update_mmu_tlb_range diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index 05efba86b870..0a1a815dc796 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -163,12 +163,6 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) } } -void update_mmu_tlb(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep) -{ - local_flush_tlb_page(vma, address); -} - void update_mmu_tlb_range(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, unsigned int nr) { diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 17d1caee39ab..117b807e3f89 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -736,13 +736,11 @@ static inline void update_mmu_tlb_range(struct vm_area_struct *vma, } #endif -#ifndef __HAVE_ARCH_UPDATE_MMU_TLB static inline void update_mmu_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { + update_mmu_tlb_range(vma, address, ptep, 1); } -#define __HAVE_ARCH_UPDATE_MMU_TLB -#endif /* * Some architectures may be able to avoid expensive synchronization -- 2.19.1.6.gb485710b