[PATCH v2] KVM: Avoid zapping unrelated shadows in __kvm_set_memory_region()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Takuya Yoshikawa <yoshikawa.takuya@xxxxxxxxxxxxx>

We do not need to zap all shadow pages of the guest when we create or
destroy a slot in this function.

To change this, we make kvm_mmu_zap_all()/kvm_arch_flush_shadow()
zap only those which have mappings into a given slot.

The way we iterate through active shadow pages is also changed to avoid
checking unrelated pages again and again.

Furthermore, the condition to see if we have any mmio sptes to clear is
changed so that we will not do flush for newly created slots.

With all these changes applied, the total amount of time needed to flush
shadow pages of a usual Linux guest, running Fedora with 4GB memory,
during a shutdown was reduced from 90ms to 60ms.

Furthermore, the total number of flushes needed to boot and shutdown
that guest was also reduced from 52 to 31.

Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@xxxxxxxxxxxxx>
Cc: Takuya Yoshikawa <takuya.yoshikawa@xxxxxxxxx>
---
 [ Added cc to my gmail account because my address may change (only) a bit
   in a few months. ]

 rebased against next-candidates

 arch/ia64/kvm/kvm-ia64.c        |    2 +-
 arch/powerpc/kvm/powerpc.c      |    2 +-
 arch/s390/kvm/kvm-s390.c        |    2 +-
 arch/x86/include/asm/kvm_host.h |    2 +-
 arch/x86/kvm/mmu.c              |   22 ++++++++++++++++++----
 arch/x86/kvm/x86.c              |   13 ++++++++++---
 include/linux/kvm_host.h        |    2 +-
 virt/kvm/kvm_main.c             |   15 ++++++---------
 8 files changed, 39 insertions(+), 21 deletions(-)

diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 9d80ff8..360abe5 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1626,7 +1626,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
 	return;
 }
 
-void kvm_arch_flush_shadow(struct kvm *kvm)
+void kvm_arch_flush_shadow(struct kvm *kvm, int slot)
 {
 	kvm_flush_remote_tlbs(kvm);
 }
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 58ad860..5680337 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -319,7 +319,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
 }
 
 
-void kvm_arch_flush_shadow(struct kvm *kvm)
+void kvm_arch_flush_shadow(struct kvm *kvm, int slot)
 {
 }
 
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index d30c835..8c25606 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -879,7 +879,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
 	return;
 }
 
-void kvm_arch_flush_shadow(struct kvm *kvm)
+void kvm_arch_flush_shadow(struct kvm *kvm, int slot)
 {
 }
 
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f624ca7..422f23a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -715,7 +715,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
 void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
 				     struct kvm_memory_slot *slot,
 				     gfn_t gfn_offset, unsigned long mask);
-void kvm_mmu_zap_all(struct kvm *kvm);
+void kvm_mmu_zap_all(struct kvm *kvm, int slot);
 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 29ad6f9..a50f7ba 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3930,16 +3930,30 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
 	kvm_flush_remote_tlbs(kvm);
 }
 
-void kvm_mmu_zap_all(struct kvm *kvm)
+/**
+ * kvm_mmu_zap_all - zap all shadows which have mappings into a given slot
+ * @kvm: the kvm instance
+ * @slot: id of the target slot
+ *
+ * If @slot is -1, zap all shadow pages.
+ */
+void kvm_mmu_zap_all(struct kvm *kvm, int slot)
 {
 	struct kvm_mmu_page *sp, *node;
 	LIST_HEAD(invalid_list);
+	int zapped;
 
 	spin_lock(&kvm->mmu_lock);
 restart:
-	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
-		if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
-			goto restart;
+	zapped = 0;
+	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
+		if ((slot >= 0) && !test_bit(slot, sp->slot_bitmap))
+			continue;
+
+		zapped |= kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
+	}
+	if (zapped)
+		goto restart;
 
 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
 	spin_unlock(&kvm->mmu_lock);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0d9a578..eac378c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5038,7 +5038,7 @@ int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
 	 * to ensure that the updated hypercall appears atomically across all
 	 * VCPUs.
 	 */
-	kvm_mmu_zap_all(vcpu->kvm);
+	kvm_mmu_zap_all(vcpu->kvm, -1);
 
 	kvm_x86_ops->patch_hypercall(vcpu, instruction);
 
@@ -6376,9 +6376,16 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
 	spin_unlock(&kvm->mmu_lock);
 }
 
-void kvm_arch_flush_shadow(struct kvm *kvm)
+/**
+ * kvm_arch_flush_shadow - flush shadows which have mappings into a given slot
+ * @kvm: the kvm instance
+ * @slot: id of the target slot
+ *
+ * If @slot is -1, flush all shadow pages.
+ */
+void kvm_arch_flush_shadow(struct kvm *kvm, int slot)
 {
-	kvm_mmu_zap_all(kvm);
+	kvm_mmu_zap_all(kvm, slot);
 	kvm_reload_remote_mmus(kvm);
 }
 
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 49c2f2f..37ebb10 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -385,7 +385,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
 				int user_alloc);
 bool kvm_largepages_enabled(void);
 void kvm_disable_largepages(void);
-void kvm_arch_flush_shadow(struct kvm *kvm);
+void kvm_arch_flush_shadow(struct kvm *kvm, int slot);
 
 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
 			    int nr_pages);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6bd34a6..54d71c4 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -412,7 +412,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
 	int idx;
 
 	idx = srcu_read_lock(&kvm->srcu);
-	kvm_arch_flush_shadow(kvm);
+	kvm_arch_flush_shadow(kvm, -1);
 	srcu_read_unlock(&kvm->srcu, idx);
 }
 
@@ -574,7 +574,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
 #else
-	kvm_arch_flush_shadow(kvm);
+	kvm_arch_flush_shadow(kvm, -1);
 #endif
 	kvm_arch_destroy_vm(kvm);
 	kvm_free_physmem(kvm);
@@ -796,7 +796,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
 		 * 	- gfn_to_hva (kvm_read_guest, gfn_to_pfn)
 		 * 	- kvm_is_visible_gfn (mmu_check_roots)
 		 */
-		kvm_arch_flush_shadow(kvm);
+		kvm_arch_flush_shadow(kvm, mem->slot);
 		kfree(old_memslots);
 	}
 
@@ -831,12 +831,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
 
 	kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
 
-	/*
-	 * If the new memory slot is created, we need to clear all
-	 * mmio sptes.
-	 */
-	if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT)
-		kvm_arch_flush_shadow(kvm);
+	/* Need to clear all mmio sptes used before. */
+	if (npages && old.npages && base_gfn != old.base_gfn)
+		kvm_arch_flush_shadow(kvm, mem->slot);
 
 	kvm_free_physmem_slot(&old, &new);
 	kfree(old_memslots);
-- 
1.7.5.4

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux