Gracefully handle and return faults on VMXON instead of letting VMXON explode. The primary motivation is to be able to reuse the helper in tests that verify VMXON faults when it's supposed to, but printing a nice error message on fault is also nice. Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- x86/vmx.c | 20 ++++++++++---------- x86/vmx.h | 29 +++++++++++++++++++++++------ x86/vmx_tests.c | 6 +++--- 3 files changed, 36 insertions(+), 19 deletions(-) diff --git a/x86/vmx.c b/x86/vmx.c index 8475bf3b..09e54332 100644 --- a/x86/vmx.c +++ b/x86/vmx.c @@ -1461,34 +1461,34 @@ static int test_vmxon(void) /* Unaligned page access */ vmxon_region = (u64 *)((intptr_t)bsp_vmxon_region + 1); - ret1 = _vmx_on(vmxon_region); - report(ret1, "test vmxon with unaligned vmxon region"); - if (!ret1) { + ret1 = __vmxon_safe(vmxon_region); + report(ret1 < 0, "test vmxon with unaligned vmxon region"); + if (ret1 >= 0) { ret = 1; goto out; } /* gpa bits beyond physical address width are set*/ vmxon_region = (u64 *)((intptr_t)bsp_vmxon_region | ((u64)1 << (width+1))); - ret1 = _vmx_on(vmxon_region); - report(ret1, "test vmxon with bits set beyond physical address width"); - if (!ret1) { + ret1 = __vmxon_safe(vmxon_region); + report(ret1 < 0, "test vmxon with bits set beyond physical address width"); + if (ret1 >= 0) { ret = 1; goto out; } /* invalid revision identifier */ *bsp_vmxon_region = 0xba9da9; - ret1 = vmx_on(); - report(ret1, "test vmxon with invalid revision identifier"); - if (!ret1) { + ret1 = vmxon_safe(); + report(ret1 < 0, "test vmxon with invalid revision identifier"); + if (ret1 >= 0) { ret = 1; goto out; } /* and finally a valid region */ *bsp_vmxon_region = basic.revision; - ret = vmx_on(); + ret = vmxon_safe(); report(!ret, "test vmxon with valid vmxon region"); out: diff --git a/x86/vmx.h b/x86/vmx.h index 7cd02410..604c78f6 100644 --- a/x86/vmx.h +++ b/x86/vmx.h @@ -870,18 +870,35 @@ void vmx_set_test_stage(u32 s); u32 vmx_get_test_stage(void); void vmx_inc_test_stage(void); -static int _vmx_on(u64 *vmxon_region) +/* -1 on VM-Fail, 0 on success, >1 on fault */ +static int __vmxon_safe(u64 *vmxon_region) { - bool ret; + bool vmfail; u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF; - asm volatile ("push %1; popf; vmxon %2; setbe %0\n\t" - : "=q" (ret) : "q" (rflags), "m" (vmxon_region) : "cc"); - return ret; + + asm volatile ("push %1\n\t" + "popf\n\t" + ASM_TRY("1f") "vmxon %2\n\t" + "setbe %0\n\t" + "jmp 2f\n\t" + "1: movb $0, %0\n\t" + "2:\n\t" + : "=q" (vmfail) : "q" (rflags), "m" (vmxon_region) : "cc"); + + if (vmfail) + return -1; + + return exception_vector(); +} + +static int vmxon_safe(void) +{ + return __vmxon_safe(bsp_vmxon_region); } static int vmx_on(void) { - return _vmx_on(bsp_vmxon_region); + return vmxon_safe(); } static int vmx_off(void) diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c index 4c963b96..60762285 100644 --- a/x86/vmx_tests.c +++ b/x86/vmx_tests.c @@ -9695,7 +9695,7 @@ static void init_signal_test_thread(void *data) u64 *ap_vmxon_region = alloc_page(); enable_vmx(); init_vmx(ap_vmxon_region); - TEST_ASSERT(!_vmx_on(ap_vmxon_region)); + TEST_ASSERT(!__vmxon_safe(ap_vmxon_region)); /* Signal CPU have entered VMX operation */ vmx_set_test_stage(1); @@ -9743,7 +9743,7 @@ static void init_signal_test_thread(void *data) while (vmx_get_test_stage() != 8) ; /* Enter VMX operation (i.e. exec VMXON) */ - TEST_ASSERT(!_vmx_on(ap_vmxon_region)); + TEST_ASSERT(!__vmxon_safe(ap_vmxon_region)); /* Signal to BSP we are in VMX operation */ vmx_set_test_stage(9); @@ -9920,7 +9920,7 @@ static void sipi_test_ap_thread(void *data) ap_vmxon_region = alloc_page(); enable_vmx(); init_vmx(ap_vmxon_region); - TEST_ASSERT(!_vmx_on(ap_vmxon_region)); + TEST_ASSERT(!__vmxon_safe(ap_vmxon_region)); init_vmcs(&ap_vmcs); make_vmcs_current(ap_vmcs); -- 2.36.1.255.ge46751e96f-goog