Convert dirty_log_test to pass around a 'struct kvm_vcpu' object instead of using a global VCPU_ID. Note, this is a "functional" change in the sense that the test now creates a vCPU with vcpu_id==0 instead of vcpu_id==5. The non-zero VCPU_ID was 100% arbitrary and added little to no validation coverage. If testing non-zero vCPU IDs is desirable for generic tests, that can be done in the future by tweaking the VM creation helpers. The test still hardcodes usage of vcpu_id==0, but only for a few lines. That wart will be removed in the not-too-distant future. Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- tools/testing/selftests/kvm/dirty_log_test.c | 59 ++++++++++---------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c index 66ee00b45847..ff3bd4c941a4 100644 --- a/tools/testing/selftests/kvm/dirty_log_test.c +++ b/tools/testing/selftests/kvm/dirty_log_test.c @@ -23,8 +23,6 @@ #include "guest_modes.h" #include "processor.h" -#define VCPU_ID 1 - /* The memory slot index to track dirty pages */ #define TEST_MEM_SLOT_INDEX 1 @@ -229,17 +227,17 @@ static void clear_log_create_vm_done(struct kvm_vm *vm) vm_enable_cap(vm, &cap); } -static void dirty_log_collect_dirty_pages(struct kvm_vm *vm, int slot, +static void dirty_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, void *bitmap, uint32_t num_pages) { - kvm_vm_get_dirty_log(vm, slot, bitmap); + kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap); } -static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot, +static void clear_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, void *bitmap, uint32_t num_pages) { - kvm_vm_get_dirty_log(vm, slot, bitmap); - kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages); + kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap); + kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages); } /* Should only be called after a GUEST_SYNC */ @@ -253,14 +251,14 @@ static void vcpu_handle_sync_stop(void) } } -static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err) +static void default_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err) { - struct kvm_run *run = vcpu_state(vm, VCPU_ID); + struct kvm_run *run = vcpu->run; TEST_ASSERT(ret == 0 || (ret == -1 && err == EINTR), "vcpu run failed: errno=%d", err); - TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC, + TEST_ASSERT(get_ucall(vcpu->vm, vcpu->id, NULL) == UCALL_SYNC, "Invalid guest sync status: exit_reason=%s\n", exit_reason_str(run->exit_reason)); @@ -331,7 +329,7 @@ static void dirty_ring_continue_vcpu(void) sem_post(&sem_vcpu_cont); } -static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot, +static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, void *bitmap, uint32_t num_pages) { /* We only have one vcpu */ @@ -351,10 +349,10 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot, } /* Only have one vcpu */ - count = dirty_ring_collect_one(vcpu_map_dirty_ring(vm, VCPU_ID), + count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu->vm, vcpu->id), slot, bitmap, num_pages, &fetch_index); - cleared = kvm_vm_reset_dirty_ring(vm); + cleared = kvm_vm_reset_dirty_ring(vcpu->vm); /* Cleared pages should be the same as collected */ TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch " @@ -369,12 +367,12 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot, pr_info("Iteration %ld collected %u pages\n", iteration, count); } -static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err) +static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err) { - struct kvm_run *run = vcpu_state(vm, VCPU_ID); + struct kvm_run *run = vcpu->run; /* A ucall-sync or ring-full event is allowed */ - if (get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC) { + if (get_ucall(vcpu->vm, vcpu->id, NULL) == UCALL_SYNC) { /* We should allow this to continue */ ; } else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL || @@ -408,10 +406,10 @@ struct log_mode { /* Hook when the vm creation is done (before vcpu creation) */ void (*create_vm_done)(struct kvm_vm *vm); /* Hook to collect the dirty pages into the bitmap provided */ - void (*collect_dirty_pages) (struct kvm_vm *vm, int slot, + void (*collect_dirty_pages) (struct kvm_vcpu *vcpu, int slot, void *bitmap, uint32_t num_pages); /* Hook to call when after each vcpu run */ - void (*after_vcpu_run)(struct kvm_vm *vm, int ret, int err); + void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err); void (*before_vcpu_join) (void); } log_modes[LOG_MODE_NUM] = { { @@ -473,22 +471,22 @@ static void log_mode_create_vm_done(struct kvm_vm *vm) mode->create_vm_done(vm); } -static void log_mode_collect_dirty_pages(struct kvm_vm *vm, int slot, +static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, void *bitmap, uint32_t num_pages) { struct log_mode *mode = &log_modes[host_log_mode]; TEST_ASSERT(mode->collect_dirty_pages != NULL, "collect_dirty_pages() is required for any log mode!"); - mode->collect_dirty_pages(vm, slot, bitmap, num_pages); + mode->collect_dirty_pages(vcpu, slot, bitmap, num_pages); } -static void log_mode_after_vcpu_run(struct kvm_vm *vm, int ret, int err) +static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err) { struct log_mode *mode = &log_modes[host_log_mode]; if (mode->after_vcpu_run) - mode->after_vcpu_run(vm, ret, err); + mode->after_vcpu_run(vcpu, ret, err); } static void log_mode_before_vcpu_join(void) @@ -510,7 +508,8 @@ static void generate_random_array(uint64_t *guest_array, uint64_t size) static void *vcpu_worker(void *data) { int ret; - struct kvm_vm *vm = data; + struct kvm_vcpu *vcpu = data; + struct kvm_vm *vm = vcpu->vm; uint64_t *guest_array; uint64_t pages_count = 0; struct kvm_signal_mask *sigmask = alloca(offsetof(struct kvm_signal_mask, sigset) @@ -525,7 +524,7 @@ static void *vcpu_worker(void *data) sigmask->len = 8; pthread_sigmask(0, NULL, sigset); sigdelset(sigset, SIG_IPI); - vcpu_ioctl(vm, VCPU_ID, KVM_SET_SIGNAL_MASK, sigmask); + vcpu_ioctl(vm, vcpu->id, KVM_SET_SIGNAL_MASK, sigmask); sigemptyset(sigset); sigaddset(sigset, SIG_IPI); @@ -537,13 +536,13 @@ static void *vcpu_worker(void *data) generate_random_array(guest_array, TEST_PAGES_PER_LOOP); pages_count += TEST_PAGES_PER_LOOP; /* Let the guest dirty the random pages */ - ret = __vcpu_run(vm, VCPU_ID); + ret = __vcpu_run(vm, vcpu->id); if (ret == -1 && errno == EINTR) { int sig = -1; sigwait(sigset, &sig); assert(sig == SIG_IPI); } - log_mode_after_vcpu_run(vm, ret, errno); + log_mode_after_vcpu_run(vcpu, ret, errno); } pr_info("Dirtied %"PRIu64" pages\n", pages_count); @@ -696,6 +695,7 @@ struct test_params { static void run_test(enum vm_guest_mode mode, void *arg) { struct test_params *p = arg; + struct kvm_vcpu *vcpu; struct kvm_vm *vm; unsigned long *bmap; @@ -713,9 +713,10 @@ static void run_test(enum vm_guest_mode mode, void *arg) * (e.g., 64K page size guest will need even less memory for * page tables). */ - vm = create_vm(mode, VCPU_ID, + vm = create_vm(mode, 0, 2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K), guest_code); + vcpu = vcpu_get(vm, 0); guest_page_size = vm_get_page_size(vm); /* @@ -776,12 +777,12 @@ static void run_test(enum vm_guest_mode mode, void *arg) host_clear_count = 0; host_track_next_count = 0; - pthread_create(&vcpu_thread, NULL, vcpu_worker, vm); + pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu); while (iteration < p->iterations) { /* Give the vcpu thread some time to dirty some pages */ usleep(p->interval * 1000); - log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX, + log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX, bmap, host_num_pages); /* -- 2.36.0.464.gb9c8b46e94-goog