The flush_tlb_all() function is not used a whole lot, but we might as well use broadcast TLB flushing there, too. Signed-off-by: Rik van Riel <riel@xxxxxxxxxxx> Tested-by: Manali Shukla <Manali.Shukla@xxxxxxx> Tested-by: Brendan Jackman <jackmanb@xxxxxxxxxx> Tested-by: Michael Kelley <mhklinux@xxxxxxxxxxx> --- arch/x86/mm/tlb.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index f44a03bca41c..a6cd61d5f423 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -1064,7 +1064,6 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end); } - static void do_flush_tlb_all(void *info) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); @@ -1074,6 +1073,15 @@ static void do_flush_tlb_all(void *info) void flush_tlb_all(void) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); + + /* First try (faster) hardware-assisted TLB invalidation. */ + if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) { + guard(preempt)(); + invlpgb_flush_all(); + return; + } + + /* Fall back to the IPI-based invalidation. */ on_each_cpu(do_flush_tlb_all, NULL, 1); } -- 2.47.1