[PATCH kvm-unit-tests] KVM: x86: add hyperv clock test case

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The test checks the relative precision of the reference TSC page
and the time reference counter.

Reworked from the initial version by Paolo Bonzini.

Signed-off-by: Roman Kagan <rkagan@xxxxxxxxxxxxx>
Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx>
---
The test is obviously supposed to fail until Hyper-V reference TSC page
implementation lands in KVM.

 x86/Makefile.common |   2 +
 x86/Makefile.x86_64 |   1 +
 x86/hyperv.h        |   9 ++
 x86/hyperv_clock.c  | 230 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 x86/unittests.cfg   |   5 ++
 5 files changed, 247 insertions(+)
 create mode 100644 x86/hyperv_clock.c

diff --git a/x86/Makefile.common b/x86/Makefile.common
index 356d879..287c0cf 100644
--- a/x86/Makefile.common
+++ b/x86/Makefile.common
@@ -67,6 +67,8 @@ $(TEST_DIR)/hyperv_synic.elf: $(TEST_DIR)/hyperv.o
 
 $(TEST_DIR)/hyperv_stimer.elf: $(TEST_DIR)/hyperv.o
 
+$(TEST_DIR)/hyperv_clock.elf: $(TEST_DIR)/hyperv.o
+
 arch_clean:
 	$(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat $(TEST_DIR)/*.elf \
 	$(TEST_DIR)/.*.d lib/x86/.*.d
diff --git a/x86/Makefile.x86_64 b/x86/Makefile.x86_64
index e166911..af99279 100644
--- a/x86/Makefile.x86_64
+++ b/x86/Makefile.x86_64
@@ -14,6 +14,7 @@ tests = $(TEST_DIR)/access.flat $(TEST_DIR)/apic.flat \
 tests += $(TEST_DIR)/svm.flat
 tests += $(TEST_DIR)/vmx.flat
 tests += $(TEST_DIR)/tscdeadline_latency.flat
+tests += $(TEST_DIR)/hyperv_clock.flat
 
 include $(TEST_DIR)/Makefile.common
 
diff --git a/x86/hyperv.h b/x86/hyperv.h
index 434a933..bef0317 100644
--- a/x86/hyperv.h
+++ b/x86/hyperv.h
@@ -11,6 +11,7 @@
 #define HV_X64_MSR_SYNTIMER_AVAILABLE           (1 << 3)
 
 #define HV_X64_MSR_TIME_REF_COUNT               0x40000020
+#define HV_X64_MSR_REFERENCE_TSC                0x40000021
 
 /* Define synthetic interrupt controller model specific registers. */
 #define HV_X64_MSR_SCONTROL                     0x40000080
@@ -179,4 +180,12 @@ void synic_sint_create(int vcpu, int sint, int vec, bool auto_eoi);
 void synic_sint_set(int vcpu, int sint);
 void synic_sint_destroy(int vcpu, int sint);
 
+struct hv_reference_tsc_page {
+        uint32_t tsc_sequence;
+        uint32_t res1;
+        uint64_t tsc_scale;
+        int64_t tsc_offset;
+};
+
+
 #endif
diff --git a/x86/hyperv_clock.c b/x86/hyperv_clock.c
new file mode 100644
index 0000000..3cd6af7
--- /dev/null
+++ b/x86/hyperv_clock.c
@@ -0,0 +1,230 @@
+#include "libcflat.h"
+#include "smp.h"
+#include "atomic.h"
+#include "processor.h"
+#include "hyperv.h"
+#include "vm.h"
+#include "asm/barrier.h"
+
+#define MAX_CPU 64
+#define NSEC_PER_SEC 1000000000ULL
+#define HV_NSEC_PER_TICK 100
+#define TICKS_PER_SEC (NSEC_PER_SEC / HV_NSEC_PER_TICK)
+
+#define DURATION 2		/* testcase duration (s) */
+#define CPI_MAX 1000		/* max cycles per iteration */
+
+struct hv_reference_tsc_page *tsc_ref;
+
+struct warp_test_info {
+	unsigned long long warps;
+	unsigned long long stalls;
+	long long worst;
+};
+struct warp_test_info wti[MAX_CPU];
+
+struct perf_test_info {
+	unsigned long long cycles;
+	unsigned long long loops;
+};
+struct perf_test_info pti[MAX_CPU];
+
+atomic_t cpus_left;
+
+/*
+ * ret = (a * b) >> 64
+ * where ret, a, b are 64bit
+ */
+static inline u64 mul64_shift64(u64 a, u64 b)
+{
+	u64 product;
+
+	__asm__ (
+		"mul %[b]"
+		: "+a" (a), "=d" (product)
+		: [b] "rm" (b) );
+
+	return product;
+}
+
+
+static inline u64 rdtsc_ordered()
+{
+	/*
+	 * FIXME: on Intel CPUs rmb() aka lfence is sufficient which brings up
+	 * to 2x speedup
+	 */
+	mb();
+	return rdtsc();
+}
+
+static u64 tsc_ref_read_once(void)
+{
+	return mul64_shift64(rdtsc_ordered(), tsc_ref->tsc_scale) +
+		tsc_ref->tsc_offset;
+}
+
+u64 time_msr_read(void)
+{
+	return rdmsr(HV_X64_MSR_TIME_REF_COUNT);
+}
+
+u64 tsc_ref_read(void)
+{
+	u32 seq;
+	u64 ret;
+
+	do {
+		seq = tsc_ref->tsc_sequence;
+		if (!seq)
+			return time_msr_read();
+		smp_rmb();		/* fetch version before data */
+		ret = tsc_ref_read_once();
+		smp_rmb();		/* test version after fetching data */
+	} while (tsc_ref->tsc_sequence != seq);
+
+	return ret;
+}
+
+static void warp_test_cpu(void *data)
+{
+	struct warp_test_info *ti = data;
+	u64 t = time_msr_read();
+	u64 end = t + DURATION * TICKS_PER_SEC;
+	u16 msr_interval = 1;
+	u64 msr_time = t + msr_interval;
+	ti->warps = 0;
+	ti->stalls = 0;
+	ti->worst = 0;
+
+	do {
+		u64 now;
+		s64 delta;
+
+		if (t >= msr_time) {
+			now = time_msr_read();
+
+			if (msr_interval >= (1U << 15))
+				msr_interval = 1;
+			else
+				msr_interval <<= 1;
+		} else
+			now = tsc_ref_read();
+
+		delta = now - t;
+
+		if (delta < 0) {
+			ti->warps++;
+			if (delta < ti->worst)
+				ti->worst = delta;
+		}
+		if (delta == 0)
+			ti->stalls++;
+
+		t = now;
+	} while (t < end);
+
+	atomic_dec(&cpus_left);
+}
+
+static void perf_test_cpu(void *data)
+{
+	struct perf_test_info *ti = data;
+	u64 end = tsc_ref_read() + DURATION * TICKS_PER_SEC;
+	ti->loops = 0;
+	ti->cycles = rdtsc();
+
+	do
+		ti->loops++;
+	while (tsc_ref_read() < end);
+
+	ti->cycles = rdtsc() - ti->cycles;
+
+	atomic_dec(&cpus_left);
+}
+
+static void presence_test(void)
+{
+	u32 seq;
+	u64 end = time_msr_read() + DURATION * TICKS_PER_SEC;
+
+	do {
+		seq = tsc_ref->tsc_sequence;
+		if (seq)
+			break;
+	} while (time_msr_read() < end);
+
+	report("TSC reference page being updated", seq);
+}
+
+static void warp_test(int ncpus)
+{
+	int i;
+	unsigned long long warps = 0, stalls = 0;
+	long long worst = 0;
+
+	atomic_set(&cpus_left, ncpus);
+	for (i = ncpus - 1; i >= 0; i--)
+		on_cpu_async(i, warp_test_cpu, &wti[i]);
+	while (atomic_read(&cpus_left));
+
+	for (i = 0; i < ncpus; i++) {
+		warps += wti[i].warps;
+		stalls += wti[i].stalls;
+		if (wti[i].worst < worst)
+			worst = wti[i].worst;
+	}
+
+	report("warps: %llu (worst %lld), stalls: %llu",
+	       warps == 0, warps, worst, stalls);
+}
+
+static void perf_test(int ncpus)
+{
+	int i;
+	unsigned long long loops = 0, cycles = 0;
+
+	atomic_set(&cpus_left, ncpus);
+	for (i = ncpus - 1; i >= 0; i--)
+		on_cpu_async(i, perf_test_cpu, &pti[i]);
+	while (atomic_read(&cpus_left));
+
+	for (i = 0; i < ncpus; i++) {
+		loops += pti[i].loops;
+		cycles += pti[i].cycles;
+	}
+
+	cycles /= loops;
+	report("iterations/s/cpu: %llu, "
+	       "cycles/iteration: %llu (expected < %u)",
+	       cycles < CPI_MAX,
+	       loops / DURATION / ncpus, cycles, CPI_MAX);
+}
+
+int main(int ac, char **av)
+{
+	int ncpus;
+
+	setup_vm();
+	smp_init();
+
+	ncpus = cpu_count();
+	if (ncpus > MAX_CPU)
+		ncpus = MAX_CPU;
+
+	tsc_ref = alloc_page();
+	wrmsr(HV_X64_MSR_REFERENCE_TSC, (u64)(uintptr_t)tsc_ref | 1);
+	report("MSR value after enabling",
+	       rdmsr(HV_X64_MSR_REFERENCE_TSC) == ((u64)(uintptr_t)tsc_ref | 1));
+
+	presence_test();
+	warp_test(ncpus);
+	perf_test(ncpus);
+
+	wrmsr(HV_X64_MSR_REFERENCE_TSC, 0LL);
+	report("MSR value after disabling", rdmsr(HV_X64_MSR_REFERENCE_TSC) == 0);
+
+	free_page(tsc_ref);
+
+	return report_summary();
+}
diff --git a/x86/unittests.cfg b/x86/unittests.cfg
index 4a1f74e..a866613 100644
--- a/x86/unittests.cfg
+++ b/x86/unittests.cfg
@@ -200,3 +200,8 @@ extra_params = -cpu kvm64,hv_synic -device hyperv-testdev
 file = hyperv_stimer.flat
 smp = 2
 extra_params = -cpu kvm64,hv_time,hv_synic,hv_stimer -device hyperv-testdev
+
+[hyperv_clock]
+file = hyperv_clock.flat
+smp = 2
+extra_params = -cpu kvm64,hv_time
-- 
2.7.4

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux