The patch titled Subject: mm/mremap: use range flush that does TLB and page walk cache flush has been removed from the -mm tree. Its filename was mm-mremap-use-range-flush-that-does-tlb-and-page-walk-cache-flush.patch This patch was dropped because an updated version will be merged ------------------------------------------------------ From: "Aneesh Kumar K.V" <aneesh.kumar@xxxxxxxxxxxxx> Subject: mm/mremap: use range flush that does TLB and page walk cache flush Some architectures do have the concept of page walk cache which need to be flush when updating higher levels of page tables. A fast mremap that involves moving page table pages instead of copying pte entries should flush page walk cache since the old translation cache is no more valid. Add new helper flush_pte_tlb_pwc_range() which invalidates both TLB and page walk cache where TLB entries are mapped with page size PAGE_SIZE. Link: https://lkml.kernel.org/r/20210422054323.150993-7-aneesh.kumar@xxxxxxxxxxxxx Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@xxxxxxxxxxxxx> Cc: Christophe Leroy <christophe.leroy@xxxxxxxxxx> Cc: Joel Fernandes <joel@xxxxxxxxxxxxxxxxx> Cc: Kalesh Singh <kaleshsingh@xxxxxxxxxx> Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx> Cc: Nicholas Piggin <npiggin@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- arch/powerpc/include/asm/book3s/64/tlbflush.h | 10 ++++++++++ mm/mremap.c | 14 ++++++++++++-- 2 files changed, 22 insertions(+), 2 deletions(-) --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h~mm-mremap-use-range-flush-that-does-tlb-and-page-walk-cache-flush +++ a/arch/powerpc/include/asm/book3s/64/tlbflush.h @@ -80,6 +80,16 @@ static inline void flush_hugetlb_tlb_ran return flush_hugetlb_tlb_pwc_range(vma, start, end, false); } +#define flush_pte_tlb_pwc_range flush_tlb_pwc_range +static inline void flush_pte_tlb_pwc_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + if (radix_enabled()) + return radix__flush_tlb_pwc_range_psize(vma->vm_mm, start, + end, mmu_virtual_psize, true); + return hash__flush_tlb_range(vma, start, end); +} + static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { --- a/mm/mremap.c~mm-mremap-use-range-flush-that-does-tlb-and-page-walk-cache-flush +++ a/mm/mremap.c @@ -210,6 +210,16 @@ static void move_ptes(struct vm_area_str drop_rmap_locks(vma); } +#ifndef flush_pte_tlb_pwc_range +#define flush_pte_tlb_pwc_range flush_pte_tlb_pwc_range +static inline void flush_pte_tlb_pwc_range(struct vm_area_struct *vma, + unsigned long start, + unsigned long end) +{ + return flush_tlb_range(vma, start, end); +} +#endif + #ifdef CONFIG_HAVE_MOVE_PMD static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) @@ -260,7 +270,7 @@ static bool move_normal_pmd(struct vm_ar VM_BUG_ON(!pmd_none(*new_pmd)); pmd_populate(mm, new_pmd, pmd_pgtable(pmd)); - flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); + flush_pte_tlb_pwc_range(vma, old_addr, old_addr + PMD_SIZE); if (new_ptl != old_ptl) spin_unlock(new_ptl); spin_unlock(old_ptl); @@ -307,7 +317,7 @@ static bool move_normal_pud(struct vm_ar VM_BUG_ON(!pud_none(*new_pud)); pud_populate(mm, new_pud, (pmd_t *)pud_page_vaddr(pud)); - flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE); + flush_pte_tlb_pwc_range(vma, old_addr, old_addr + PUD_SIZE); if (new_ptl != old_ptl) spin_unlock(new_ptl); spin_unlock(old_ptl); _ Patches currently in -mm which might be from aneesh.kumar@xxxxxxxxxxxxx are mm-mremap-move-tlb-flush-outside-page-table-lock.patch mm-mremap-allow-arch-runtime-override.patch powerpc-mm-enable-move-pmd-pud.patch