[PATCH v6 2/2] KVM: Using workqueue for WBINVD

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



It would buy us the ability to schedule compared to smp_call_function().

Signed-off-by: Sheng Yang <sheng@xxxxxxxxxxxxxxx>
---

But I am not sure if it worth the complexity. Anyway WBINVD itself can't be
interrupted, so the benefit should come to the caller cpu I think. And that
would extended the waiting time for the caller cpu, for we still need to wait
other cpus to complete their works.

 arch/x86/include/asm/kvm_host.h |    1 +
 arch/x86/kvm/x86.c              |   35 ++++++++++++++++++++++++++++-------
 2 files changed, 29 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2bda624..6e0b793 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -16,6 +16,7 @@
 #include <linux/mmu_notifier.h>
 #include <linux/tracepoint.h>
 #include <linux/cpumask.h>
+#include <linux/workqueue.h>
 
 #include <linux/kvm.h>
 #include <linux/kvm_para.h>
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index eea75f5..13f7c88 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -153,6 +153,9 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 
 u64 __read_mostly host_xcr0;
 
+static struct workqueue_struct *wbinvd_wq;
+static DEFINE_PER_CPU(struct work_struct, wbinvd_work);
+
 static inline u32 bit(int bitno)
 {
 	return 1 << (bitno & 31);
@@ -1783,7 +1786,7 @@ out:
 	return r;
 }
 
-static void wbinvd_ipi(void *garbage)
+static void wbinvd_do_work(struct work_struct *work)
 {
 	wbinvd();
 }
@@ -1800,9 +1803,11 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 	if (need_emulate_wbinvd(vcpu)) {
 		if (kvm_x86_ops->has_wbinvd_exit())
 			cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
-		else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
-			smp_call_function_single(vcpu->cpu,
-					wbinvd_ipi, NULL, 1);
+		else if (vcpu->cpu != -1 && vcpu->cpu != cpu) {
+			queue_work_on(vcpu->cpu, wbinvd_wq,
+					&per_cpu(wbinvd_work, vcpu->cpu));
+			flush_workqueue(wbinvd_wq);
+		}
 	}
 
 	kvm_x86_ops->vcpu_load(vcpu, cpu);
@@ -3672,12 +3677,16 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
 
 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
 {
+	int cpu;
+
 	if (!need_emulate_wbinvd(vcpu))
 		return X86EMUL_CONTINUE;
 
 	if (kvm_x86_ops->has_wbinvd_exit()) {
-		smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
-				wbinvd_ipi, NULL, 1);
+		for_each_cpu(cpu, vcpu->arch.wbinvd_dirty_mask)
+			queue_work_on(cpu, wbinvd_wq,
+					&per_cpu(wbinvd_work, cpu));
+		flush_workqueue(wbinvd_wq);
 		cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
 	}
 	wbinvd();
@@ -4179,7 +4188,7 @@ EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
 
 int kvm_arch_init(void *opaque)
 {
-	int r;
+	int r, cpu;
 	struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
 
 	if (kvm_x86_ops) {
@@ -4218,6 +4227,16 @@ int kvm_arch_init(void *opaque)
 	if (cpu_has_xsave)
 		host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
 
+	for_each_possible_cpu(cpu) {
+		INIT_WORK(&per_cpu(wbinvd_work, cpu), wbinvd_do_work);
+	}
+
+	wbinvd_wq = create_workqueue("kvm_wbinvd");
+	if (!wbinvd_wq) {
+		r = -ENOMEM;
+		goto out;
+	}
+
 	return 0;
 
 out:
@@ -4226,6 +4245,8 @@ out:
 
 void kvm_arch_exit(void)
 {
+	destroy_workqueue(wbinvd_wq);
+
 	perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
 
 	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
-- 
1.7.0.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux