Convert memslot_perf_test to use __vm_create_with_one_vcpu() and pass around a 'struct kvm_vcpu' object instead of using a global VCPU_ID. This is the first of many, many steps towards eliminating VCPU_ID from all KVM selftests, and towards eventually purging the VM+vcpu_id mess. Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- .../testing/selftests/kvm/memslot_perf_test.c | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c index 1727f75e0c2c..009eb19b28af 100644 --- a/tools/testing/selftests/kvm/memslot_perf_test.c +++ b/tools/testing/selftests/kvm/memslot_perf_test.c @@ -25,8 +25,6 @@ #include <kvm_util.h> #include <processor.h> -#define VCPU_ID 0 - #define MEM_SIZE ((512U << 20) + 4096) #define MEM_SIZE_PAGES (MEM_SIZE / 4096) #define MEM_GPA 0x10000000UL @@ -90,6 +88,7 @@ static_assert(MEM_TEST_MOVE_SIZE <= MEM_TEST_SIZE, struct vm_data { struct kvm_vm *vm; + struct kvm_vcpu *vcpu; pthread_t vcpu_thread; uint32_t nslots; uint64_t npages; @@ -127,29 +126,29 @@ static bool verbose; pr_info(__VA_ARGS__); \ } while (0) -static void check_mmio_access(struct vm_data *vm, struct kvm_run *run) +static void check_mmio_access(struct vm_data *data, struct kvm_run *run) { - TEST_ASSERT(vm->mmio_ok, "Unexpected mmio exit"); + TEST_ASSERT(data->mmio_ok, "Unexpected mmio exit"); TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read"); TEST_ASSERT(run->mmio.len == 8, "Unexpected exit mmio size = %u", run->mmio.len); - TEST_ASSERT(run->mmio.phys_addr >= vm->mmio_gpa_min && - run->mmio.phys_addr <= vm->mmio_gpa_max, + TEST_ASSERT(run->mmio.phys_addr >= data->mmio_gpa_min && + run->mmio.phys_addr <= data->mmio_gpa_max, "Unexpected exit mmio address = 0x%llx", run->mmio.phys_addr); } -static void *vcpu_worker(void *data) +static void *vcpu_worker(void *__data) { - struct vm_data *vm = data; - struct kvm_run *run; + struct vm_data *data = __data; + struct kvm_vcpu *vcpu = data->vcpu; + struct kvm_run *run = vcpu->run; struct ucall uc; - run = vcpu_state(vm->vm, VCPU_ID); while (1) { - vcpu_run(vm->vm, VCPU_ID); + vcpu_run(data->vm, vcpu->id); - switch (get_ucall(vm->vm, VCPU_ID, &uc)) { + switch (get_ucall(data->vm, vcpu->id, &uc)) { case UCALL_SYNC: TEST_ASSERT(uc.args[1] == 0, "Unexpected sync ucall, got %lx", @@ -158,7 +157,7 @@ static void *vcpu_worker(void *data) continue; case UCALL_NONE: if (run->exit_reason == KVM_EXIT_MMIO) - check_mmio_access(vm, run); + check_mmio_access(data, run); else goto done; break; @@ -238,6 +237,7 @@ static struct vm_data *alloc_vm(void) TEST_ASSERT(data, "malloc(vmdata) failed"); data->vm = NULL; + data->vcpu = NULL; data->hva_slots = NULL; return data; @@ -278,7 +278,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots, data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots); TEST_ASSERT(data->hva_slots, "malloc() fail"); - data->vm = vm_create_default(VCPU_ID, mempages, guest_code); + data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code); ucall_init(data->vm, NULL); pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n", -- 2.36.0.464.gb9c8b46e94-goog