Add three variants of the #PF interception access test to handle TLB invalidations by relying on VPID rules. Intercept the access test's INVLPG and perform invalidation by: 1. Implicity flush on VM-Enter by disabling VPID 2. Explicitly perform INVVPID on the target address 3. Implicitly "flush" by moving to a new VPID Case #3 exposes a bug where KVM fails to update unsync SPTEs when using shadow paging and L1 changes the VPID it uses for L2, i.e. vmcs12->vpid. Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- x86/unittests.cfg | 6 ++-- x86/vmx_tests.c | 89 ++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 91 insertions(+), 4 deletions(-) diff --git a/x86/unittests.cfg b/x86/unittests.cfg index f3f9f17..80875d2 100644 --- a/x86/unittests.cfg +++ b/x86/unittests.cfg @@ -284,7 +284,7 @@ arch = i386 [vmx] file = vmx.flat -extra_params = -cpu max,+vmx -append "-exit_monitor_from_l2_test -ept_access* -vmx_smp* -vmx_vmcs_shadow_test -atomic_switch_overflow_msrs_test -vmx_init_signal_test -vmx_apic_passthrough_tpr_threshold_test -apic_reg_virt_test -virt_x2apic_mode_test -vmx_pf_exception_test" +extra_params = -cpu max,+vmx -append "-exit_monitor_from_l2_test -ept_access* -vmx_smp* -vmx_vmcs_shadow_test -atomic_switch_overflow_msrs_test -vmx_init_signal_test -vmx_apic_passthrough_tpr_threshold_test -apic_reg_virt_test -virt_x2apic_mode_test -vmx_pf_exception_test -vmx_pf_no_vpid_test -vmx_pf_vpid_test" arch = x86_64 groups = vmx @@ -353,13 +353,13 @@ groups = vmx [vmx_pf_exception_test] file = vmx.flat -extra_params = -cpu max,+vmx -append vmx_pf_exception_test +extra_params = -cpu max,+vmx -append "vmx_pf_exception_test vmx_pf_no_vpid_test vmx_pf_vpid_test vmx_pf_invvpid_test" arch = x86_64 groups = vmx nested_exception [vmx_pf_exception_test_reduced_maxphyaddr] file = vmx.flat -extra_params = -cpu IvyBridge,phys-bits=36,host-phys-bits=off,+vmx -append vmx_pf_exception_test +extra_params = -cpu IvyBridge,phys-bits=36,host-phys-bits=off,+vmx -append "vmx_pf_exception_test vmx_pf_no_vpid_test vmx_pf_vpid_test vmx_pf_invvpid_test" arch = x86_64 groups = vmx nested_exception check = /sys/module/kvm_intel/parameters/allow_smaller_maxphyaddr=Y diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c index 172d385..3d57ed6 100644 --- a/x86/vmx_tests.c +++ b/x86/vmx_tests.c @@ -10575,13 +10575,21 @@ static void vmx_pf_exception_test_guest(void) ac_test_run(PT_LEVEL_PML4); } -static void vmx_pf_exception_test(void) +typedef void (*invalidate_tlb_t)(void *data); + +static void __vmx_pf_exception_test(invalidate_tlb_t inv_fn, void *data) { u64 efer; struct cpuid cpuid; test_set_guest(vmx_pf_exception_test_guest); + /* Intercept INVLPG when to perform TLB invalidation from L1 (this). */ + if (inv_fn) + vmcs_set_bits(CPU_EXEC_CTRL0, CPU_INVLPG); + else + vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_INVLPG); + enter_guest(); while (vmcs_read(EXI_REASON) != VMX_VMCALL) { @@ -10605,6 +10613,9 @@ static void vmx_pf_exception_test(void) regs.rcx = cpuid.c; regs.rdx = cpuid.d; break; + case VMX_INVLPG: + inv_fn(data); + break; default: assert_msg(false, "Unexpected exit to L1, exit_reason: %s (0x%lx)", @@ -10617,6 +10628,79 @@ static void vmx_pf_exception_test(void) assert_exit_reason(VMX_VMCALL); } + +static void vmx_pf_exception_test(void) +{ + __vmx_pf_exception_test(NULL, NULL); +} + +static void invalidate_tlb_no_vpid(void *data) +{ + /* If VPID is disabled, the TLB is flushed on VM-Enter and VM-Exit. */ +} + +static void vmx_pf_no_vpid_test(void) +{ + if (is_vpid_supported()) + vmcs_clear_bits(CPU_EXEC_CTRL1, CPU_VPID); + + __vmx_pf_exception_test(invalidate_tlb_no_vpid, NULL); +} + +static void invalidate_tlb_invvpid_addr(void *data) +{ + invvpid(INVVPID_ALL, *(u16 *)data, vmcs_read(EXI_QUALIFICATION)); +} + +static void invalidate_tlb_new_vpid(void *data) +{ + u16 *vpid = data; + + /* + * Bump VPID to effectively flush L2's TLB from L0's perspective. + * Invalidate all VPIDs when the VPID wraps to zero as hardware/KVM is + * architecturally allowed to keep TLB entries indefinitely. + */ + ++(*vpid); + if (*vpid == 0) { + ++(*vpid); + invvpid(INVVPID_ALL, 0, 0); + } + vmcs_write(VPID, *vpid); +} + +static void __vmx_pf_vpid_test(invalidate_tlb_t inv_fn, u16 vpid) +{ + if (!is_vpid_supported()) + test_skip("VPID unsupported"); + + if (!is_invvpid_supported()) + test_skip("INVVPID unsupported"); + + vmcs_set_bits(CPU_EXEC_CTRL0, CPU_SECONDARY); + vmcs_set_bits(CPU_EXEC_CTRL1, CPU_VPID); + vmcs_write(VPID, vpid); + + __vmx_pf_exception_test(inv_fn, &vpid); +} + +static void vmx_pf_invvpid_test(void) +{ + if (!is_invvpid_type_supported(INVVPID_ADDR)) + test_skip("INVVPID ADDR unsupported"); + + __vmx_pf_vpid_test(invalidate_tlb_invvpid_addr, 0xaaaa); +} + +static void vmx_pf_vpid_test(void) +{ + /* Need INVVPID(ALL) to flush VPIDs upon wrap/reuse. */ + if (!is_invvpid_type_supported(INVVPID_ALL)) + test_skip("INVVPID ALL unsupported"); + + __vmx_pf_vpid_test(invalidate_tlb_new_vpid, 1); +} + #define TEST(name) { #name, .v2 = name } /* name/init/guest_main/exit_handler/syscall_handler/guest_regs */ @@ -10723,5 +10807,8 @@ struct vmx_test vmx_tests[] = { TEST(vmx_mtf_test), TEST(vmx_mtf_pdpte_test), TEST(vmx_pf_exception_test), + TEST(vmx_pf_no_vpid_test), + TEST(vmx_pf_invvpid_test), + TEST(vmx_pf_vpid_test), { NULL, NULL, NULL, NULL, NULL, {0} }, }; -- 2.34.0.rc2.393.gf8c9666880-goog