From: Nadav Amit <namit@xxxxxxxxxx> Avoid open-coding mmu_gather for no reason. There is no apparent reason not to use the existing mmu_gather interfaces. Use the newly introduced pte_may_need_flush() to check whether a flush is needed to avoid unnecassary flushes. Signed-off-by: Nadav Amit <namit@xxxxxxxxxx> Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Andy Lutomirski <luto@xxxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Cc: Yu Zhao <yuzhao@xxxxxxxxxx> Cc: Nick Piggin <npiggin@xxxxxxxxx> Cc: x86@xxxxxxxxxx --- mm/mapping_dirty_helpers.c | 37 +++++++++++-------------------------- 1 file changed, 11 insertions(+), 26 deletions(-) diff --git a/mm/mapping_dirty_helpers.c b/mm/mapping_dirty_helpers.c index b59054ef2e10..2ce6cf431026 100644 --- a/mm/mapping_dirty_helpers.c +++ b/mm/mapping_dirty_helpers.c @@ -4,7 +4,7 @@ #include <linux/bitops.h> #include <linux/mmu_notifier.h> #include <asm/cacheflush.h> -#include <asm/tlbflush.h> +#include <asm/tlb.h> /** * struct wp_walk - Private struct for pagetable walk callbacks @@ -15,8 +15,7 @@ */ struct wp_walk { struct mmu_notifier_range range; - unsigned long tlbflush_start; - unsigned long tlbflush_end; + struct mmu_gather tlb; unsigned long total; }; @@ -42,9 +41,9 @@ static int wp_pte(pte_t *pte, unsigned long addr, unsigned long end, ptent = pte_wrprotect(old_pte); ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); wpwalk->total++; - wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr); - wpwalk->tlbflush_end = max(wpwalk->tlbflush_end, - addr + PAGE_SIZE); + + if (pte_may_need_flush(old_pte, ptent)) + tlb_flush_pte_range(&wpwalk->tlb, addr, PAGE_SIZE); } return 0; @@ -101,9 +100,7 @@ static int clean_record_pte(pte_t *pte, unsigned long addr, ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); wpwalk->total++; - wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr); - wpwalk->tlbflush_end = max(wpwalk->tlbflush_end, - addr + PAGE_SIZE); + tlb_flush_pte_range(&wpwalk->tlb, addr, PAGE_SIZE); __set_bit(pgoff, cwalk->bitmap); cwalk->start = min(cwalk->start, pgoff); @@ -184,20 +181,13 @@ static int wp_clean_pre_vma(unsigned long start, unsigned long end, { struct wp_walk *wpwalk = walk->private; - wpwalk->tlbflush_start = end; - wpwalk->tlbflush_end = start; - mmu_notifier_range_init(&wpwalk->range, MMU_NOTIFY_PROTECTION_PAGE, 0, walk->vma, walk->mm, start, end); mmu_notifier_invalidate_range_start(&wpwalk->range); flush_cache_range(walk->vma, start, end); - /* - * We're not using tlb_gather_mmu() since typically - * only a small subrange of PTEs are affected, whereas - * tlb_gather_mmu() records the full range. - */ - inc_tlb_flush_pending(walk->mm); + tlb_gather_mmu(&wpwalk->tlb, walk->mm); + tlb_start_vma(&wpwalk->tlb, walk->vma); return 0; } @@ -212,15 +202,10 @@ static void wp_clean_post_vma(struct mm_walk *walk) { struct wp_walk *wpwalk = walk->private; - if (mm_tlb_flush_nested(walk->mm)) - flush_tlb_range(walk->vma, wpwalk->range.start, - wpwalk->range.end); - else if (wpwalk->tlbflush_end > wpwalk->tlbflush_start) - flush_tlb_range(walk->vma, wpwalk->tlbflush_start, - wpwalk->tlbflush_end); - mmu_notifier_invalidate_range_end(&wpwalk->range); - dec_tlb_flush_pending(walk->mm); + + tlb_end_vma(&wpwalk->tlb, walk->vma); + tlb_finish_mmu(&wpwalk->tlb); } /* -- 2.25.1