[kvm-unit-tests PATCH v3 27/27] x86: ipi_stress: add optional SVM support

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Allow some vCPUs to be in SVM nested mode while waiting for
an interrupt.

Signed-off-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx>
---
 x86/ipi_stress.c | 79 +++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 78 insertions(+), 1 deletion(-)

diff --git a/x86/ipi_stress.c b/x86/ipi_stress.c
index dea3e605..1a4c5510 100644
--- a/x86/ipi_stress.c
+++ b/x86/ipi_stress.c
@@ -12,10 +12,12 @@
 #include "types.h"
 #include "alloc_page.h"
 #include "vmalloc.h"
+#include "svm_lib.h"
 #include "random.h"
 
 u64 num_iterations = 100000;
 float hlt_prob = 0.1;
+bool use_svm;
 volatile bool end_test;
 
 #define APIC_TIMER_PERIOD (1000*1000*1000)
@@ -25,6 +27,7 @@ struct cpu_test_state {
 	u64 last_isr_count;
 	struct random_state random;
 	int smp_id;
+	struct svm_vcpu vcpu;
 } *cpu_states;
 
 
@@ -71,6 +74,62 @@ static void wait_for_ipi(struct cpu_test_state *state)
 	assert(state->isr_count == old_count + 1);
 }
 
+#ifdef __x86_64__
+static void l2_guest_wait_for_ipi(struct cpu_test_state *state)
+{
+	wait_for_ipi(state);
+	asm volatile("vmmcall");
+}
+
+static void l2_guest_dummy(void)
+{
+	while (true)
+		asm volatile("vmmcall");
+}
+
+static void wait_for_ipi_in_l2(struct cpu_test_state *state)
+{
+	u64 old_count = state->isr_count;
+	struct svm_vcpu *vcpu = &state->vcpu;
+	bool poll_in_the_guest;
+
+	/*
+	 * if poll_in_the_guest is true, then the guest will run
+	 * with interrupts disabled and it will enable them for one instruction
+	 * (sometimes together with halting) until it receives an interrupts
+	 *
+	 * if poll_in_the_guest is false, the guest will always have
+	 * interrupts enabled and will usually receive the interrupt
+	 * right away, but in case it didn't we will run the guest again
+	 * until it does.
+	 *
+	 */
+	poll_in_the_guest = random_decision(&state->random, 50);
+
+	vcpu->regs.rdi = (u64)state;
+	vcpu->regs.rsp = (ulong)vcpu->stack;
+
+	vcpu->vmcb->save.rip = poll_in_the_guest ?
+			(ulong)l2_guest_wait_for_ipi :
+			(ulong)l2_guest_dummy;
+
+	if (!poll_in_the_guest)
+		vcpu->vmcb->save.rflags |= X86_EFLAGS_IF;
+	else
+		vcpu->vmcb->save.rflags &= ~X86_EFLAGS_IF;
+
+	do {
+		asm volatile("clgi;sti");
+		SVM_VMRUN(vcpu);
+		asm volatile("cli;stgi");
+		assert(vcpu->vmcb->control.exit_code == SVM_EXIT_VMMCALL);
+
+		if (poll_in_the_guest)
+			assert(old_count < state->isr_count);
+
+	} while (old_count == state->isr_count);
+}
+#endif
 
 static void vcpu_init(void *)
 {
@@ -85,6 +144,11 @@ static void vcpu_init(void *)
 	state->random = get_prng();
 	state->isr_count = 0;
 	state->smp_id = smp_id();
+
+#ifdef __x86_64__
+	if (use_svm)
+		svm_vcpu_init(&state->vcpu);
+#endif
 }
 
 static void vcpu_code(void *)
@@ -111,7 +175,12 @@ static void vcpu_code(void *)
 			break;
 
 		// wait for the IPI interrupt chain to come back to us
-		wait_for_ipi(state);
+#if __x86_64__
+		if (use_svm && random_decision(&state->random, 20))
+			wait_for_ipi_in_l2(state);
+		else
+#endif
+			wait_for_ipi(state);
 	}
 }
 
@@ -137,6 +206,14 @@ int main(int argc, void **argv)
 	setup_vm();
 	init_prng();
 
+#ifdef __x86_64__
+	if (this_cpu_has(X86_FEATURE_SVM)) {
+		use_svm = true;
+		if (!setup_svm())
+			use_svm = false;
+	}
+#endif
+
 	cpu_states = calloc(ncpus, sizeof(cpu_states[0]));
 
 	printf("found %d cpus\n", ncpus);
-- 
2.34.3




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux