Explicitly verify that KVM doesn't patch in the native hypercall if the FIX_HYPERCALL_INSN quirk is disabled. The test currently verifies that a #UD occurred, but doesn't actually verify that no patching occurred. Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- .../selftests/kvm/x86_64/fix_hypercall_test.c | 35 ++++++++++++++----- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c index dde97be3e719..5925da3b3648 100644 --- a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c +++ b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c @@ -21,8 +21,8 @@ static bool ud_expected; static void guest_ud_handler(struct ex_regs *regs) { - GUEST_ASSERT(ud_expected); - GUEST_DONE(); + regs->rax = -EFAULT; + regs->rip += HYPERCALL_INSN_SIZE; } extern unsigned char svm_hypercall_insn[HYPERCALL_INSN_SIZE]; @@ -57,17 +57,18 @@ static void guest_main(void) { unsigned char *native_hypercall_insn, *hypercall_insn; uint8_t apic_id; + uint64_t ret; apic_id = GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID)); if (is_intel_cpu()) { native_hypercall_insn = vmx_hypercall_insn; hypercall_insn = svm_hypercall_insn; - svm_do_sched_yield(apic_id); + ret = svm_do_sched_yield(apic_id); } else if (is_amd_cpu()) { native_hypercall_insn = svm_hypercall_insn; hypercall_insn = vmx_hypercall_insn; - vmx_do_sched_yield(apic_id); + ret = vmx_do_sched_yield(apic_id); } else { GUEST_ASSERT(0); /* unreachable */ @@ -75,12 +76,28 @@ static void guest_main(void) } /* - * The hypercall didn't #UD (guest_ud_handler() signals "done" if a #UD - * occurs). Verify that a #UD is NOT expected and that KVM patched in - * the native hypercall. + * If the quirk is disabled, verify that guest_ud_handler() "returned" + * -EFAULT and that KVM did NOT patch the hypercall. If the quirk is + * enabled, verify that the hypercall succeeded and that KVM patched in + * the "right" hypercall. */ - GUEST_ASSERT(!ud_expected); - GUEST_ASSERT(!memcmp(native_hypercall_insn, hypercall_insn, HYPERCALL_INSN_SIZE)); + if (ud_expected) { + GUEST_ASSERT(ret == (uint64_t)-EFAULT); + + /* + * Divergence should occur only on the last byte, as the VMCALL + * (0F 01 C1) and VMMCALL (0F 01 D9) share the first two bytes. + */ + GUEST_ASSERT(!memcmp(native_hypercall_insn, hypercall_insn, + HYPERCALL_INSN_SIZE - 1)); + GUEST_ASSERT(memcmp(native_hypercall_insn, hypercall_insn, + HYPERCALL_INSN_SIZE)); + } else { + GUEST_ASSERT(!ret); + GUEST_ASSERT(!memcmp(native_hypercall_insn, hypercall_insn, + HYPERCALL_INSN_SIZE)); + } + GUEST_DONE(); } -- 2.37.2.789.g6183377224-goog