Hi Krish, On 2/6/20 8:08 PM, Krish Sadhukhan wrote: > > On 2/6/20 9:39 AM, Wei Huang wrote: >> On 02/06 11:47, Eric Auger wrote: >>> L2 guest calls vmcall and L1 checks the exit status does >>> correspond. >>> >>> Signed-off-by: Eric Auger <eric.auger@xxxxxxxxxx> >>> Reviewed-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx> >>> Reviewed-by: Miaohe Lin <linmiaohe@xxxxxxxxxx> >> I verified this patch with my AMD box, both with nested=1 and nested=0. I >> also intentionally changed the assertion of exit_code to a different >> value (0x082) and the test complained about it. So the test is good. >> >> # selftests: kvm: svm_vmcall_test >> # ==== Test Assertion Failure ==== >> # x86_64/svm_vmcall_test.c:64: false >> # pid=2485656 tid=2485656 - Interrupted system call >> # 1 0x0000000000401387: main at svm_vmcall_test.c:72 >> # 2 0x00007fd0978d71a2: ?? ??:0 >> # 3 0x00000000004013ed: _start at ??:? >> # Failed guest assert: vmcb->control.exit_code == SVM_EXIT_VMMCALL >> # Testing guest mode: PA-bits:ANY, VA-bits:48, 4K pages >> # Guest physical address width detected: 48 >> not ok 15 selftests: kvm: svm_vmcall_test # exit=254 >> >>> --- >>> >>> v3 -> v4: >>> - remove useless includes >>> - collected Lin's R-b >>> >>> v2 -> v3: >>> - remove useless comment and add Vitaly's R-b >>> --- >>> tools/testing/selftests/kvm/Makefile | 1 + >>> .../selftests/kvm/x86_64/svm_vmcall_test.c | 79 +++++++++++++++++++ >>> 2 files changed, 80 insertions(+) >>> create mode 100644 >>> tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c >>> >>> diff --git a/tools/testing/selftests/kvm/Makefile >>> b/tools/testing/selftests/kvm/Makefile >>> index 2e770f554cae..b529d3b42c02 100644 >>> --- a/tools/testing/selftests/kvm/Makefile >>> +++ b/tools/testing/selftests/kvm/Makefile >>> @@ -26,6 +26,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test >>> TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test >>> TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test >>> TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test >>> +TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test >>> TEST_GEN_PROGS_x86_64 += clear_dirty_log_test >>> TEST_GEN_PROGS_x86_64 += dirty_log_test >>> TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus >>> diff --git a/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c >>> b/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c >>> new file mode 100644 >>> index 000000000000..6d3565aab94e >>> --- /dev/null >>> +++ b/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c >> Probably rename the file to svm_nested_vmcall_test.c. This matches with >> the naming convention of VMX's nested tests. Otherwise people might >> not know >> it is a nested one. > > Is it better to give this file a generic name, say, nsvm_tests or > something like that, and place all future nested SVM tests in it, rather > than creating a separate file for each nested test ? We had this discussion earlier. See https://lkml.org/lkml/2020/1/21/429 In v1 I proposed a similar framework as kut with sub-tests but it looks we do not target such kind of tests in kselftests. vmcall test is just a first dummy test that paves the way for more involved API tests. Thanks Eric >> >> Everything else looks good. >> >>> @@ -0,0 +1,79 @@ >>> +// SPDX-License-Identifier: GPL-2.0-only >>> +/* >>> + * svm_vmcall_test >>> + * >>> + * Copyright (C) 2020, Red Hat, Inc. >>> + * >>> + * Nested SVM testing: VMCALL >>> + */ >>> + >>> +#include "test_util.h" >>> +#include "kvm_util.h" >>> +#include "processor.h" >>> +#include "svm_util.h" >>> + >>> +#define VCPU_ID 5 >>> + >>> +static struct kvm_vm *vm; >>> + >>> +static inline void l2_vmcall(struct svm_test_data *svm) >>> +{ >>> + __asm__ __volatile__("vmcall"); >>> +} >>> + >>> +static void l1_guest_code(struct svm_test_data *svm) >>> +{ >>> + #define L2_GUEST_STACK_SIZE 64 >>> + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; >>> + struct vmcb *vmcb = svm->vmcb; >>> + >>> + /* Prepare for L2 execution. */ >>> + generic_svm_setup(svm, l2_vmcall, >>> + &l2_guest_stack[L2_GUEST_STACK_SIZE]); >>> + >>> + run_guest(vmcb, svm->vmcb_gpa); >>> + >>> + GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); >>> + GUEST_DONE(); >>> +} >>> + >>> +int main(int argc, char *argv[]) >>> +{ >>> + vm_vaddr_t svm_gva; >>> + >>> + nested_svm_check_supported(); >>> + >>> + vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); >>> + vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); >>> + >>> + vcpu_alloc_svm(vm, &svm_gva); >>> + vcpu_args_set(vm, VCPU_ID, 1, svm_gva); >>> + >>> + for (;;) { >>> + volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); >>> + struct ucall uc; >>> + >>> + vcpu_run(vm, VCPU_ID); >>> + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, >>> + "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", >>> + run->exit_reason, >>> + exit_reason_str(run->exit_reason)); >>> + >>> + switch (get_ucall(vm, VCPU_ID, &uc)) { >>> + case UCALL_ABORT: >>> + TEST_ASSERT(false, "%s", >>> + (const char *)uc.args[0]); >>> + /* NOT REACHED */ >>> + case UCALL_SYNC: >>> + break; >>> + case UCALL_DONE: >>> + goto done; >>> + default: >>> + TEST_ASSERT(false, >>> + "Unknown ucall 0x%x.", uc.cmd); >>> + } >>> + } >>> +done: >>> + kvm_vm_free(vm); >>> + return 0; >>> +} >> >