Do arch-specific range-based TLB flushes (if they're supported) when flushing in response to mmu_notifier events, as a single range-based flush is almost always more performant. This is especially true in the case of mmu_notifier events, as the majority of events that hit a running VM operate on a relatively small range of memory. Cc: Marc Zyngier <maz@xxxxxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- This is *very* lightly tested, a thumbs up from the ARM world would be much appreciated. virt/kvm/kvm_main.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index d0788d0a72cc..46bb95d58d53 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -599,6 +599,7 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm, struct kvm_gfn_range gfn_range; struct kvm_memory_slot *slot; struct kvm_memslots *slots; + bool need_flush = false; int i, idx; if (WARN_ON_ONCE(range->end <= range->start)) @@ -651,10 +652,22 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm, goto mmu_unlock; } r.ret |= range->handler(kvm, &gfn_range); + + /* + * Use a precise gfn-based TLB flush when possible, as + * most mmu_notifier events affect a small-ish range. + * Fall back to a full TLB flush if the gfn-based flush + * fails, and don't bother trying the gfn-based flush + * if a full flush is already pending. + */ + if (range->flush_on_ret && !need_flush && r.ret && + kvm_arch_flush_remote_tlbs_range(kvm, gfn_range.start, + gfn_range.end - gfn_range.start)) + need_flush = true; } } - if (range->flush_on_ret && r.ret) + if (need_flush) kvm_flush_remote_tlbs(kvm); mmu_unlock: base-commit: 332d2c1d713e232e163386c35a3ba0c1b90df83f -- 2.46.0.rc2.264.g509ed76dc8-goog