Modify kvm_mmu_invalidate_zap_pages_in_memslot(), a.k.a. the x86 MMU's handler for kvm_arch_flush_shadow_memslot(), to zap only the pages/PTEs that actually belong to the memslot being removed. This improves performance, especially why the deleted memslot has only a few shadow entries, or even no entries. E.g. a microbenchmark to access regular memory while concurrently reading PCI ROM to trigger memslot deletion showed a 5% improvement in throughput. Cc: Xiao Guangrong <guangrong.xiao@xxxxxxxxx> Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> --- arch/x86/kvm/mmu.c | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 0f43458f3782..bb56beb166e4 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -5612,7 +5612,38 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, struct kvm_page_track_notifier_node *node) { - kvm_mmu_invalidate_zap_all_pages(kvm); + struct kvm_mmu_page *sp; + LIST_HEAD(invalid_list); + unsigned long i; + bool flush; + gfn_t gfn; + + spin_lock(&kvm->mmu_lock); + + if (list_empty(&kvm->arch.active_mmu_pages)) + goto out_unlock; + + flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false); + + for (i = 0; i < slot->npages; i++) { + gfn = slot->base_gfn + i; + + for_each_valid_sp(kvm, sp, gfn) { + if (sp->gfn != gfn) + continue; + + kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); + } + if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { + kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); + flush = false; + cond_resched_lock(&kvm->mmu_lock); + } + } + kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); + +out_unlock: + spin_unlock(&kvm->mmu_lock); } void kvm_mmu_init_vm(struct kvm *kvm) -- 2.19.2