Originally, flush tlb is done by slot_handle_level_range(). This patch is to flush tlb directly in the kvm_zap_gfn_range() when range flush is available. Signed-off-by: Lan Tianyu <Tianyu.Lan@xxxxxxxxxxxxx> --- arch/x86/kvm/mmu.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index cadb6a0b5247..9ae5887c8d1c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -5583,6 +5583,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; + bool flush = false; int i; spin_lock(&kvm->mmu_lock); @@ -5590,18 +5591,26 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) slots = __kvm_memslots(kvm, i); kvm_for_each_memslot(memslot, slots) { gfn_t start, end; + bool flush_tlb = true; start = max(gfn_start, memslot->base_gfn); end = min(gfn_end, memslot->base_gfn + memslot->npages); if (start >= end) continue; - slot_handle_level_range(kvm, memslot, kvm_zap_rmapp, - PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL, - start, end - 1, true); + if (kvm_available_flush_tlb_with_range()) + flush_tlb = false; + + flush = slot_handle_level_range(kvm, memslot, + kvm_zap_rmapp, PT_PAGE_TABLE_LEVEL, + PT_MAX_HUGEPAGE_LEVEL, start, + end - 1, flush_tlb); } } + if (flush && kvm_available_flush_tlb_with_range()) + kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end); + spin_unlock(&kvm->mmu_lock); } -- 2.14.4