[kvm-unit-tests PATCH v4 2/8] x86: nSVM: Move all nNPT test cases from svm_tests.c to a separate file.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



nNPT test cases are moved to a separate file svm_npt.c
so that they can be run independently with PTE_USER_MASK disabled.

Rest of the test cases can be run with PTE_USER_MASK enabled.

No functional change intended.

Suggested-by: Sean Christopherson <seanjc@xxxxxxxxxx>
Signed-off-by: Manali Shukla <manali.shukla@xxxxxxx>
---
 x86/Makefile.common |   2 +
 x86/Makefile.x86_64 |   2 +
 x86/svm.c           |   8 -
 x86/svm_npt.c       | 390 ++++++++++++++++++++++++++++++++++++++++++++
 x86/svm_tests.c     | 371 +----------------------------------------
 x86/unittests.cfg   |   6 +
 6 files changed, 409 insertions(+), 370 deletions(-)
 create mode 100644 x86/svm_npt.c

diff --git a/x86/Makefile.common b/x86/Makefile.common
index b903988..5590afe 100644
--- a/x86/Makefile.common
+++ b/x86/Makefile.common
@@ -107,6 +107,8 @@ $(TEST_DIR)/access_test.$(bin): $(TEST_DIR)/access.o
 
 $(TEST_DIR)/vmx.$(bin): $(TEST_DIR)/access.o
 
+$(TEST_DIR)/svm_npt.$(bin): $(TEST_DIR)/svm.o
+
 $(TEST_DIR)/kvmclock_test.$(bin): $(TEST_DIR)/kvmclock.o
 
 $(TEST_DIR)/hyperv_synic.$(bin): $(TEST_DIR)/hyperv.o
diff --git a/x86/Makefile.x86_64 b/x86/Makefile.x86_64
index f18c1e2..dbe5967 100644
--- a/x86/Makefile.x86_64
+++ b/x86/Makefile.x86_64
@@ -42,6 +42,7 @@ endif
 ifneq ($(CONFIG_EFI),y)
 tests += $(TEST_DIR)/access_test.$(exe)
 tests += $(TEST_DIR)/svm.$(exe)
+tests += $(TEST_DIR)/svm_npt.$(exe)
 tests += $(TEST_DIR)/vmx.$(exe)
 endif
 
@@ -55,3 +56,4 @@ $(TEST_DIR)/hyperv_clock.$(bin): $(TEST_DIR)/hyperv_clock.o
 
 $(TEST_DIR)/vmx.$(bin): $(TEST_DIR)/vmx_tests.o
 $(TEST_DIR)/svm.$(bin): $(TEST_DIR)/svm_tests.o
+$(TEST_DIR)/svm_npt.$(bin): $(TEST_DIR)/svm_npt.o
diff --git a/x86/svm.c b/x86/svm.c
index 299383c..ec825c7 100644
--- a/x86/svm.c
+++ b/x86/svm.c
@@ -440,11 +440,3 @@ int run_svm_tests(int ac, char **av)
 
 	return report_summary();
 }
-
-int main(int ac, char **av)
-{
-    pteval_t opt_mask = 0;
-
-    __setup_vm(&opt_mask);
-    return run_svm_tests(ac, av);
-}
diff --git a/x86/svm_npt.c b/x86/svm_npt.c
new file mode 100644
index 0000000..53e8a90
--- /dev/null
+++ b/x86/svm_npt.c
@@ -0,0 +1,390 @@
+#include "svm.h"
+#include "vm.h"
+#include "alloc_page.h"
+#include "vmalloc.h"
+
+static void *scratch_page;
+
+static void null_test(struct svm_test *test)
+{
+}
+
+static void npt_np_prepare(struct svm_test *test)
+{
+	u64 *pte;
+
+	scratch_page = alloc_page();
+	pte = npt_get_pte((u64) scratch_page);
+
+	*pte &= ~1ULL;
+}
+
+static void npt_np_test(struct svm_test *test)
+{
+	(void)*(volatile u64 *)scratch_page;
+}
+
+static bool npt_np_check(struct svm_test *test)
+{
+	u64 *pte = npt_get_pte((u64) scratch_page);
+
+	*pte |= 1ULL;
+
+	return (vmcb->control.exit_code == SVM_EXIT_NPF)
+	    && (vmcb->control.exit_info_1 == 0x100000004ULL);
+}
+
+static void npt_nx_prepare(struct svm_test *test)
+{
+	u64 *pte;
+
+	test->scratch = rdmsr(MSR_EFER);
+	wrmsr(MSR_EFER, test->scratch | EFER_NX);
+
+	/* Clear the guest's EFER.NX, it should not affect NPT behavior. */
+	vmcb->save.efer &= ~EFER_NX;
+
+	pte = npt_get_pte((u64) null_test);
+
+	*pte |= PT64_NX_MASK;
+}
+
+static bool npt_nx_check(struct svm_test *test)
+{
+	u64 *pte = npt_get_pte((u64) null_test);
+
+	wrmsr(MSR_EFER, test->scratch);
+
+	*pte &= ~PT64_NX_MASK;
+
+	return (vmcb->control.exit_code == SVM_EXIT_NPF)
+	    && (vmcb->control.exit_info_1 == 0x100000015ULL);
+}
+
+static void npt_us_prepare(struct svm_test *test)
+{
+	u64 *pte;
+
+	scratch_page = alloc_page();
+	pte = npt_get_pte((u64) scratch_page);
+
+	*pte &= ~(1ULL << 2);
+}
+
+static void npt_us_test(struct svm_test *test)
+{
+	(void)*(volatile u64 *)scratch_page;
+}
+
+static bool npt_us_check(struct svm_test *test)
+{
+	u64 *pte = npt_get_pte((u64) scratch_page);
+
+	*pte |= (1ULL << 2);
+
+	return (vmcb->control.exit_code == SVM_EXIT_NPF)
+	    && (vmcb->control.exit_info_1 == 0x100000005ULL);
+}
+
+static void npt_rw_prepare(struct svm_test *test)
+{
+
+	u64 *pte;
+
+	pte = npt_get_pte(0x80000);
+
+	*pte &= ~(1ULL << 1);
+}
+
+static void npt_rw_test(struct svm_test *test)
+{
+	u64 *data = (void *)(0x80000);
+
+	*data = 0;
+}
+
+static bool npt_rw_check(struct svm_test *test)
+{
+	u64 *pte = npt_get_pte(0x80000);
+
+	*pte |= (1ULL << 1);
+
+	return (vmcb->control.exit_code == SVM_EXIT_NPF)
+	    && (vmcb->control.exit_info_1 == 0x100000007ULL);
+}
+
+static void npt_rw_pfwalk_prepare(struct svm_test *test)
+{
+
+	u64 *pte;
+
+	pte = npt_get_pte(read_cr3());
+
+	*pte &= ~(1ULL << 1);
+}
+
+static bool npt_rw_pfwalk_check(struct svm_test *test)
+{
+	u64 *pte = npt_get_pte(read_cr3());
+
+	*pte |= (1ULL << 1);
+
+	return (vmcb->control.exit_code == SVM_EXIT_NPF)
+	    && (vmcb->control.exit_info_1 == 0x200000007ULL)
+	    && (vmcb->control.exit_info_2 == read_cr3());
+}
+
+static void npt_l1mmio_prepare(struct svm_test *test)
+{
+}
+
+u32 nested_apic_version1;
+u32 nested_apic_version2;
+
+static void npt_l1mmio_test(struct svm_test *test)
+{
+	volatile u32 *data = (volatile void *)(0xfee00030UL);
+
+	nested_apic_version1 = *data;
+	nested_apic_version2 = *data;
+}
+
+static bool npt_l1mmio_check(struct svm_test *test)
+{
+	volatile u32 *data = (volatile void *)(0xfee00030);
+	u32 lvr = *data;
+
+	return nested_apic_version1 == lvr && nested_apic_version2 == lvr;
+}
+
+static void npt_rw_l1mmio_prepare(struct svm_test *test)
+{
+
+	u64 *pte;
+
+	pte = npt_get_pte(0xfee00080);
+
+	*pte &= ~(1ULL << 1);
+}
+
+static void npt_rw_l1mmio_test(struct svm_test *test)
+{
+	volatile u32 *data = (volatile void *)(0xfee00080);
+
+	*data = *data;
+}
+
+static bool npt_rw_l1mmio_check(struct svm_test *test)
+{
+	u64 *pte = npt_get_pte(0xfee00080);
+
+	*pte |= (1ULL << 1);
+
+	return (vmcb->control.exit_code == SVM_EXIT_NPF)
+	    && (vmcb->control.exit_info_1 == 0x100000007ULL);
+}
+
+static void basic_guest_main(struct svm_test *test)
+{
+}
+
+static void __svm_npt_rsvd_bits_test(u64 * pxe, u64 rsvd_bits, u64 efer,
+				     ulong cr4, u64 guest_efer, ulong guest_cr4)
+{
+	u64 pxe_orig = *pxe;
+	int exit_reason;
+	u64 pfec;
+
+	wrmsr(MSR_EFER, efer);
+	write_cr4(cr4);
+
+	vmcb->save.efer = guest_efer;
+	vmcb->save.cr4 = guest_cr4;
+
+	*pxe |= rsvd_bits;
+
+	exit_reason = svm_vmrun();
+
+	report(exit_reason == SVM_EXIT_NPF,
+	       "Wanted #NPF on rsvd bits = 0x%lx, got exit = 0x%x", rsvd_bits,
+	       exit_reason);
+
+	if (pxe == npt_get_pdpe() || pxe == npt_get_pml4e()) {
+		/*
+		 * The guest's page tables will blow up on a bad PDPE/PML4E,
+		 * before starting the final walk of the guest page.
+		 */
+		pfec = 0x20000000full;
+	} else {
+		/* RSVD #NPF on final walk of guest page. */
+		pfec = 0x10000000dULL;
+
+		/* PFEC.FETCH=1 if NX=1 *or* SMEP=1. */
+		if ((cr4 & X86_CR4_SMEP) || (efer & EFER_NX))
+			pfec |= 0x10;
+
+	}
+
+	report(vmcb->control.exit_info_1 == pfec,
+	       "Wanted PFEC = 0x%lx, got PFEC = %lx, PxE = 0x%lx.  "
+	       "host.NX = %u, host.SMEP = %u, guest.NX = %u, guest.SMEP = %u",
+	       pfec, vmcb->control.exit_info_1, *pxe,
+	       !!(efer & EFER_NX), !!(cr4 & X86_CR4_SMEP),
+	       !!(guest_efer & EFER_NX), !!(guest_cr4 & X86_CR4_SMEP));
+
+	*pxe = pxe_orig;
+}
+
+static void _svm_npt_rsvd_bits_test(u64 * pxe, u64 pxe_rsvd_bits, u64 efer,
+				    ulong cr4, u64 guest_efer, ulong guest_cr4)
+{
+	u64 rsvd_bits;
+	int i;
+
+	/*
+	 * RDTSC or RDRAND can sometimes fail to generate a valid reserved bits
+	 */
+	if (!pxe_rsvd_bits) {
+		report_skip
+		    ("svm_npt_rsvd_bits_test: Reserved bits are not valid");
+		return;
+	}
+
+	/*
+	 * Test all combinations of guest/host EFER.NX and CR4.SMEP.  If host
+	 * EFER.NX=0, use NX as the reserved bit, otherwise use the passed in
+	 * @pxe_rsvd_bits.
+	 */
+	for (i = 0; i < 16; i++) {
+		if (i & 1) {
+			rsvd_bits = pxe_rsvd_bits;
+			efer |= EFER_NX;
+		} else {
+			rsvd_bits = PT64_NX_MASK;
+			efer &= ~EFER_NX;
+		}
+		if (i & 2)
+			cr4 |= X86_CR4_SMEP;
+		else
+			cr4 &= ~X86_CR4_SMEP;
+		if (i & 4)
+			guest_efer |= EFER_NX;
+		else
+			guest_efer &= ~EFER_NX;
+		if (i & 8)
+			guest_cr4 |= X86_CR4_SMEP;
+		else
+			guest_cr4 &= ~X86_CR4_SMEP;
+
+		__svm_npt_rsvd_bits_test(pxe, rsvd_bits, efer, cr4,
+					 guest_efer, guest_cr4);
+	}
+}
+
+static u64 get_random_bits(u64 hi, u64 low)
+{
+	unsigned retry = 5;
+	u64 rsvd_bits = 0;
+
+	if (this_cpu_has(X86_FEATURE_RDRAND)) {
+		do {
+			rsvd_bits = (rdrand() << low) & GENMASK_ULL(hi, low);
+			retry--;
+		} while (!rsvd_bits && retry);
+	}
+
+	if (!rsvd_bits) {
+		retry = 5;
+		do {
+			rsvd_bits = (rdtsc() << low) & GENMASK_ULL(hi, low);
+			retry--;
+		} while (!rsvd_bits && retry);
+	}
+
+	return rsvd_bits;
+}
+
+static void svm_npt_rsvd_bits_test(void)
+{
+	u64 saved_efer, host_efer, sg_efer, guest_efer;
+	ulong saved_cr4, host_cr4, sg_cr4, guest_cr4;
+
+	if (!npt_supported()) {
+		report_skip("NPT not supported");
+		return;
+	}
+
+	saved_efer = host_efer = rdmsr(MSR_EFER);
+	saved_cr4 = host_cr4 = read_cr4();
+	sg_efer = guest_efer = vmcb->save.efer;
+	sg_cr4 = guest_cr4 = vmcb->save.cr4;
+
+	test_set_guest(basic_guest_main);
+
+	/*
+	 * 4k PTEs don't have reserved bits if MAXPHYADDR >= 52, just skip the
+	 * sub-test.  The NX test is still valid, but the extra bit of coverage
+	 * isn't worth the extra complexity.
+	 */
+	if (cpuid_maxphyaddr() >= 52)
+		goto skip_pte_test;
+
+	_svm_npt_rsvd_bits_test(npt_get_pte((u64) basic_guest_main),
+				get_random_bits(51, cpuid_maxphyaddr()),
+				host_efer, host_cr4, guest_efer, guest_cr4);
+
+skip_pte_test:
+	_svm_npt_rsvd_bits_test(npt_get_pde((u64) basic_guest_main),
+				get_random_bits(20, 13) | PT_PAGE_SIZE_MASK,
+				host_efer, host_cr4, guest_efer, guest_cr4);
+
+	_svm_npt_rsvd_bits_test(npt_get_pdpe(),
+				PT_PAGE_SIZE_MASK |
+				(this_cpu_has(X86_FEATURE_GBPAGES) ?
+				 get_random_bits(29, 13) : 0), host_efer,
+				host_cr4, guest_efer, guest_cr4);
+
+	_svm_npt_rsvd_bits_test(npt_get_pml4e(), BIT_ULL(8),
+				host_efer, host_cr4, guest_efer, guest_cr4);
+
+	wrmsr(MSR_EFER, saved_efer);
+	write_cr4(saved_cr4);
+	vmcb->save.efer = sg_efer;
+	vmcb->save.cr4 = sg_cr4;
+}
+
+int main(int ac, char **av)
+{
+	pteval_t opt_mask = 0;
+
+	__setup_vm(&opt_mask);
+	return run_svm_tests(ac, av);
+}
+
+#define TEST(name) { #name, .v2 = name }
+
+struct svm_test svm_tests[] = {
+	{ "npt_nx", npt_supported, npt_nx_prepare,
+	 default_prepare_gif_clear, null_test,
+	 default_finished, npt_nx_check },
+	{ "npt_np", npt_supported, npt_np_prepare,
+	 default_prepare_gif_clear, npt_np_test,
+	 default_finished, npt_np_check },
+	{ "npt_us", npt_supported, npt_us_prepare,
+	 default_prepare_gif_clear, npt_us_test,
+	 default_finished, npt_us_check },
+	{ "npt_rw", npt_supported, npt_rw_prepare,
+	 default_prepare_gif_clear, npt_rw_test,
+	 default_finished, npt_rw_check },
+	{ "npt_rw_pfwalk", npt_supported, npt_rw_pfwalk_prepare,
+	 default_prepare_gif_clear, null_test,
+	 default_finished, npt_rw_pfwalk_check },
+	{ "npt_l1mmio", npt_supported, npt_l1mmio_prepare,
+	 default_prepare_gif_clear, npt_l1mmio_test,
+	 default_finished, npt_l1mmio_check },
+	{ "npt_rw_l1mmio", npt_supported, npt_rw_l1mmio_prepare,
+	 default_prepare_gif_clear, npt_rw_l1mmio_test,
+	 default_finished, npt_rw_l1mmio_check },
+	TEST(svm_npt_rsvd_bits_test),
+	{ NULL, NULL, NULL, NULL, NULL, NULL, NULL }
+};
diff --git a/x86/svm_tests.c b/x86/svm_tests.c
index 6a9b03b..f0eeb1d 100644
--- a/x86/svm_tests.c
+++ b/x86/svm_tests.c
@@ -10,11 +10,10 @@
 #include "isr.h"
 #include "apic.h"
 #include "delay.h"
+#include "vmalloc.h"
 
 #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f
 
-static void *scratch_page;
-
 #define LATENCY_RUNS 1000000
 
 extern u16 cpu_online_count;
@@ -698,181 +697,6 @@ static bool sel_cr0_bug_check(struct svm_test *test)
     return vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
 }
 
-static void npt_nx_prepare(struct svm_test *test)
-{
-    u64 *pte;
-
-    test->scratch = rdmsr(MSR_EFER);
-    wrmsr(MSR_EFER, test->scratch | EFER_NX);
-
-    /* Clear the guest's EFER.NX, it should not affect NPT behavior. */
-    vmcb->save.efer &= ~EFER_NX;
-
-    pte = npt_get_pte((u64)null_test);
-
-    *pte |= PT64_NX_MASK;
-}
-
-static bool npt_nx_check(struct svm_test *test)
-{
-    u64 *pte = npt_get_pte((u64)null_test);
-
-    wrmsr(MSR_EFER, test->scratch);
-
-    *pte &= ~PT64_NX_MASK;
-
-    return (vmcb->control.exit_code == SVM_EXIT_NPF)
-           && (vmcb->control.exit_info_1 == 0x100000015ULL);
-}
-
-static void npt_np_prepare(struct svm_test *test)
-{
-    u64 *pte;
-
-    scratch_page = alloc_page();
-    pte = npt_get_pte((u64)scratch_page);
-
-    *pte &= ~1ULL;
-}
-
-static void npt_np_test(struct svm_test *test)
-{
-    (void) *(volatile u64 *)scratch_page;
-}
-
-static bool npt_np_check(struct svm_test *test)
-{
-    u64 *pte = npt_get_pte((u64)scratch_page);
-
-    *pte |= 1ULL;
-
-    return (vmcb->control.exit_code == SVM_EXIT_NPF)
-           && (vmcb->control.exit_info_1 == 0x100000004ULL);
-}
-
-static void npt_us_prepare(struct svm_test *test)
-{
-    u64 *pte;
-
-    scratch_page = alloc_page();
-    pte = npt_get_pte((u64)scratch_page);
-
-    *pte &= ~(1ULL << 2);
-}
-
-static void npt_us_test(struct svm_test *test)
-{
-    (void) *(volatile u64 *)scratch_page;
-}
-
-static bool npt_us_check(struct svm_test *test)
-{
-    u64 *pte = npt_get_pte((u64)scratch_page);
-
-    *pte |= (1ULL << 2);
-
-    return (vmcb->control.exit_code == SVM_EXIT_NPF)
-           && (vmcb->control.exit_info_1 == 0x100000005ULL);
-}
-
-static void npt_rw_prepare(struct svm_test *test)
-{
-
-    u64 *pte;
-
-    pte = npt_get_pte(0x80000);
-
-    *pte &= ~(1ULL << 1);
-}
-
-static void npt_rw_test(struct svm_test *test)
-{
-    u64 *data = (void*)(0x80000);
-
-    *data = 0;
-}
-
-static bool npt_rw_check(struct svm_test *test)
-{
-    u64 *pte = npt_get_pte(0x80000);
-
-    *pte |= (1ULL << 1);
-
-    return (vmcb->control.exit_code == SVM_EXIT_NPF)
-           && (vmcb->control.exit_info_1 == 0x100000007ULL);
-}
-
-static void npt_rw_pfwalk_prepare(struct svm_test *test)
-{
-
-    u64 *pte;
-
-    pte = npt_get_pte(read_cr3());
-
-    *pte &= ~(1ULL << 1);
-}
-
-static bool npt_rw_pfwalk_check(struct svm_test *test)
-{
-    u64 *pte = npt_get_pte(read_cr3());
-
-    *pte |= (1ULL << 1);
-
-    return (vmcb->control.exit_code == SVM_EXIT_NPF)
-           && (vmcb->control.exit_info_1 == 0x200000007ULL)
-	   && (vmcb->control.exit_info_2 == read_cr3());
-}
-
-static void npt_l1mmio_prepare(struct svm_test *test)
-{
-}
-
-u32 nested_apic_version1;
-u32 nested_apic_version2;
-
-static void npt_l1mmio_test(struct svm_test *test)
-{
-    volatile u32 *data = (volatile void*)(0xfee00030UL);
-
-    nested_apic_version1 = *data;
-    nested_apic_version2 = *data;
-}
-
-static bool npt_l1mmio_check(struct svm_test *test)
-{
-    volatile u32 *data = (volatile void*)(0xfee00030);
-    u32 lvr = *data;
-
-    return nested_apic_version1 == lvr && nested_apic_version2 == lvr;
-}
-
-static void npt_rw_l1mmio_prepare(struct svm_test *test)
-{
-
-    u64 *pte;
-
-    pte = npt_get_pte(0xfee00080);
-
-    *pte &= ~(1ULL << 1);
-}
-
-static void npt_rw_l1mmio_test(struct svm_test *test)
-{
-    volatile u32 *data = (volatile void*)(0xfee00080);
-
-    *data = *data;
-}
-
-static bool npt_rw_l1mmio_check(struct svm_test *test)
-{
-    u64 *pte = npt_get_pte(0xfee00080);
-
-    *pte |= (1ULL << 1);
-
-    return (vmcb->control.exit_code == SVM_EXIT_NPF)
-           && (vmcb->control.exit_info_1 == 0x100000007ULL);
-}
-
 #define TSC_ADJUST_VALUE    (1ll << 32)
 #define TSC_OFFSET_VALUE    (~0ull << 48)
 static bool ok;
@@ -2672,169 +2496,6 @@ static void svm_test_singlestep(void)
 		vmcb->save.rip == (u64)&guest_end, "Test EFLAGS.TF on VMRUN: guest execution completion");
 }
 
-static void __svm_npt_rsvd_bits_test(u64 *pxe, u64 rsvd_bits, u64 efer,
-				     ulong cr4, u64 guest_efer, ulong guest_cr4)
-{
-	u64 pxe_orig = *pxe;
-	int exit_reason;
-	u64 pfec;
-
-	wrmsr(MSR_EFER, efer);
-	write_cr4(cr4);
-
-	vmcb->save.efer = guest_efer;
-	vmcb->save.cr4  = guest_cr4;
-
-	*pxe |= rsvd_bits;
-
-	exit_reason = svm_vmrun();
-
-	report(exit_reason == SVM_EXIT_NPF,
-	       "Wanted #NPF on rsvd bits = 0x%lx, got exit = 0x%x", rsvd_bits, exit_reason);
-
-	if (pxe == npt_get_pdpe() || pxe == npt_get_pml4e()) {
-		/*
-		 * The guest's page tables will blow up on a bad PDPE/PML4E,
-		 * before starting the final walk of the guest page.
-		 */
-		pfec = 0x20000000full;
-	} else {
-		/* RSVD #NPF on final walk of guest page. */
-		pfec = 0x10000000dULL;
-
-		/* PFEC.FETCH=1 if NX=1 *or* SMEP=1. */
-		if ((cr4 & X86_CR4_SMEP) || (efer & EFER_NX))
-			pfec |= 0x10;
-
-	}
-
-	report(vmcb->control.exit_info_1 == pfec,
-	       "Wanted PFEC = 0x%lx, got PFEC = %lx, PxE = 0x%lx.  "
-	       "host.NX = %u, host.SMEP = %u, guest.NX = %u, guest.SMEP = %u",
-	       pfec, vmcb->control.exit_info_1, *pxe,
-	       !!(efer & EFER_NX), !!(cr4 & X86_CR4_SMEP),
-	       !!(guest_efer & EFER_NX), !!(guest_cr4 & X86_CR4_SMEP));
-
-	*pxe = pxe_orig;
-}
-
-static void _svm_npt_rsvd_bits_test(u64 *pxe, u64 pxe_rsvd_bits,  u64 efer,
-				    ulong cr4, u64 guest_efer, ulong guest_cr4)
-{
-	u64 rsvd_bits;
-	int i;
-
-	/*
-	 * RDTSC or RDRAND can sometimes fail to generate a valid reserved bits
-	 */
-	if (!pxe_rsvd_bits) {
-		report_skip("svm_npt_rsvd_bits_test: Reserved bits are not valid");
-		return;
-	}
-
-	/*
-	 * Test all combinations of guest/host EFER.NX and CR4.SMEP.  If host
-	 * EFER.NX=0, use NX as the reserved bit, otherwise use the passed in
-	 * @pxe_rsvd_bits.
-	 */
-	for (i = 0; i < 16; i++) {
-		if (i & 1) {
-			rsvd_bits = pxe_rsvd_bits;
-			efer |= EFER_NX;
-		} else {
-			rsvd_bits = PT64_NX_MASK;
-			efer &= ~EFER_NX;
-		}
-		if (i & 2)
-			cr4 |= X86_CR4_SMEP;
-		else
-			cr4 &= ~X86_CR4_SMEP;
-		if (i & 4)
-			guest_efer |= EFER_NX;
-		else
-			guest_efer &= ~EFER_NX;
-		if (i & 8)
-			guest_cr4 |= X86_CR4_SMEP;
-		else
-			guest_cr4 &= ~X86_CR4_SMEP;
-
-		__svm_npt_rsvd_bits_test(pxe, rsvd_bits, efer, cr4,
-					 guest_efer, guest_cr4);
-	}
-}
-
-static u64 get_random_bits(u64 hi, u64 low)
-{
-	unsigned retry = 5;
-	u64 rsvd_bits = 0;
-
-	if (this_cpu_has(X86_FEATURE_RDRAND)) {
-		do {
-			rsvd_bits = (rdrand() << low) & GENMASK_ULL(hi, low);
-			retry--;
-		} while (!rsvd_bits && retry);
-	}
-
-	if (!rsvd_bits) {
-		retry = 5;
-		do {
-			rsvd_bits = (rdtsc() << low) & GENMASK_ULL(hi, low);
-			retry--;
-		} while (!rsvd_bits && retry);
-	}
-
-	return rsvd_bits;
-}
-
-
-static void svm_npt_rsvd_bits_test(void)
-{
-	u64   saved_efer, host_efer, sg_efer, guest_efer;
-	ulong saved_cr4,  host_cr4,  sg_cr4,  guest_cr4;
-
-	if (!npt_supported()) {
-		report_skip("NPT not supported");
-		return;
-	}
-
-	saved_efer = host_efer  = rdmsr(MSR_EFER);
-	saved_cr4  = host_cr4   = read_cr4();
-	sg_efer    = guest_efer = vmcb->save.efer;
-	sg_cr4     = guest_cr4  = vmcb->save.cr4;
-
-	test_set_guest(basic_guest_main);
-
-	/*
-	 * 4k PTEs don't have reserved bits if MAXPHYADDR >= 52, just skip the
-	 * sub-test.  The NX test is still valid, but the extra bit of coverage
-	 * isn't worth the extra complexity.
-	 */
-	if (cpuid_maxphyaddr() >= 52)
-		goto skip_pte_test;
-
-	_svm_npt_rsvd_bits_test(npt_get_pte((u64)basic_guest_main),
-				get_random_bits(51, cpuid_maxphyaddr()),
-				host_efer, host_cr4, guest_efer, guest_cr4);
-
-skip_pte_test:
-	_svm_npt_rsvd_bits_test(npt_get_pde((u64)basic_guest_main),
-				get_random_bits(20, 13) | PT_PAGE_SIZE_MASK,
-				host_efer, host_cr4, guest_efer, guest_cr4);
-
-	_svm_npt_rsvd_bits_test(npt_get_pdpe(),
-				PT_PAGE_SIZE_MASK |
-					(this_cpu_has(X86_FEATURE_GBPAGES) ? get_random_bits(29, 13) : 0),
-				host_efer, host_cr4, guest_efer, guest_cr4);
-
-	_svm_npt_rsvd_bits_test(npt_get_pml4e(), BIT_ULL(8),
-				host_efer, host_cr4, guest_efer, guest_cr4);
-
-	wrmsr(MSR_EFER, saved_efer);
-	write_cr4(saved_cr4);
-	vmcb->save.efer = sg_efer;
-	vmcb->save.cr4  = sg_cr4;
-}
-
 static bool volatile svm_errata_reproduced = false;
 static unsigned long volatile physical = 0;
 
@@ -3634,6 +3295,14 @@ static void svm_intr_intercept_mix_smi(void)
 	svm_intr_intercept_mix_run_guest(NULL, SVM_EXIT_SMI);
 }
 
+int main(int ac, char **av)
+{
+    pteval_t opt_mask = 0;
+
+    __setup_vm(&opt_mask);
+    return run_svm_tests(ac, av);
+}
+
 struct svm_test svm_tests[] = {
     { "null", default_supported, default_prepare,
       default_prepare_gif_clear, null_test,
@@ -3677,27 +3346,6 @@ struct svm_test svm_tests[] = {
     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare,
       default_prepare_gif_clear, sel_cr0_bug_test,
        sel_cr0_bug_finished, sel_cr0_bug_check },
-    { "npt_nx", npt_supported, npt_nx_prepare,
-      default_prepare_gif_clear, null_test,
-      default_finished, npt_nx_check },
-    { "npt_np", npt_supported, npt_np_prepare,
-      default_prepare_gif_clear, npt_np_test,
-      default_finished, npt_np_check },
-    { "npt_us", npt_supported, npt_us_prepare,
-      default_prepare_gif_clear, npt_us_test,
-      default_finished, npt_us_check },
-    { "npt_rw", npt_supported, npt_rw_prepare,
-      default_prepare_gif_clear, npt_rw_test,
-      default_finished, npt_rw_check },
-    { "npt_rw_pfwalk", npt_supported, npt_rw_pfwalk_prepare,
-      default_prepare_gif_clear, null_test,
-      default_finished, npt_rw_pfwalk_check },
-    { "npt_l1mmio", npt_supported, npt_l1mmio_prepare,
-      default_prepare_gif_clear, npt_l1mmio_test,
-      default_finished, npt_l1mmio_check },
-    { "npt_rw_l1mmio", npt_supported, npt_rw_l1mmio_prepare,
-      default_prepare_gif_clear, npt_rw_l1mmio_test,
-      default_finished, npt_rw_l1mmio_check },
     { "tsc_adjust", tsc_adjust_supported, tsc_adjust_prepare,
       default_prepare_gif_clear, tsc_adjust_test,
       default_finished, tsc_adjust_check },
@@ -3749,7 +3397,6 @@ struct svm_test svm_tests[] = {
       vgif_check },
     TEST(svm_cr4_osxsave_test),
     TEST(svm_guest_state_test),
-    TEST(svm_npt_rsvd_bits_test),
     TEST(svm_vmrun_errata_test),
     TEST(svm_vmload_vmsave),
     TEST(svm_test_singlestep),
diff --git a/x86/unittests.cfg b/x86/unittests.cfg
index 3701797..1828d2c 100644
--- a/x86/unittests.cfg
+++ b/x86/unittests.cfg
@@ -258,6 +258,12 @@ file = svm.flat
 extra_params = -cpu max,+svm -overcommit cpu-pm=on -m 4g -append pause_filter_test
 arch = x86_64
 
+[svm_npt]
+file = svm_npt.flat
+smp = 2
+extra_params = -cpu max,+svm -m 4g
+arch = x86_64
+
 [taskswitch]
 file = taskswitch.flat
 arch = i386
-- 
2.30.2




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux