On 27/05/19 14:30, Andrew Jones wrote: > Make sure we complete the I/O after determining we have a ucall, > which is I/O. Also allow the *uc parameter to optionally be NULL. > It's quite possible that a test case will only care about the > return value, like for example when looping on a check for > UCALL_DONE. > > Signed-off-by: Andrew Jones <drjones@xxxxxxxxxx> > Reviewed-by: Peter Xu <peterx@xxxxxxxxxx> > --- > v2: > - rebase; there was a change to get_ucall() that affected > context. > - Also switch all unit tests to using a NULL uc if possible. > It was only possible for one though. Some unit tests only > use uc.cmd in error messages, but I guess that's a good > enough reason to have a non-NULL uc. > - add Peter's r-b > > tools/testing/selftests/kvm/dirty_log_test.c | 3 +-- > tools/testing/selftests/kvm/lib/ucall.c | 19 +++++++++++++------ > 2 files changed, 14 insertions(+), 8 deletions(-) > > diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c > index fc27f890155b..ceb52b952637 100644 > --- a/tools/testing/selftests/kvm/dirty_log_test.c > +++ b/tools/testing/selftests/kvm/dirty_log_test.c > @@ -121,7 +121,6 @@ static void *vcpu_worker(void *data) > uint64_t *guest_array; > uint64_t pages_count = 0; > struct kvm_run *run; > - struct ucall uc; > > run = vcpu_state(vm, VCPU_ID); > > @@ -132,7 +131,7 @@ static void *vcpu_worker(void *data) > /* Let the guest dirty the random pages */ > ret = _vcpu_run(vm, VCPU_ID); > TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); > - if (get_ucall(vm, VCPU_ID, &uc) == UCALL_SYNC) { > + if (get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC) { > pages_count += TEST_PAGES_PER_LOOP; > generate_random_array(guest_array, TEST_PAGES_PER_LOOP); > } else { > diff --git a/tools/testing/selftests/kvm/lib/ucall.c b/tools/testing/selftests/kvm/lib/ucall.c > index b701a01cfcb6..dd9a66700f96 100644 > --- a/tools/testing/selftests/kvm/lib/ucall.c > +++ b/tools/testing/selftests/kvm/lib/ucall.c > @@ -125,16 +125,16 @@ void ucall(uint64_t cmd, int nargs, ...) > uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) > { > struct kvm_run *run = vcpu_state(vm, vcpu_id); > - > - memset(uc, 0, sizeof(*uc)); > + struct ucall ucall = {}; > + bool got_ucall = false; > > #ifdef __x86_64__ > if (ucall_type == UCALL_PIO && run->exit_reason == KVM_EXIT_IO && > run->io.port == UCALL_PIO_PORT) { > struct kvm_regs regs; > vcpu_regs_get(vm, vcpu_id, ®s); > - memcpy(uc, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi), sizeof(*uc)); > - return uc->cmd; > + memcpy(&ucall, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi), sizeof(ucall)); > + got_ucall = true; > } > #endif > if (ucall_type == UCALL_MMIO && run->exit_reason == KVM_EXIT_MMIO && > @@ -143,8 +143,15 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) > TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8, > "Unexpected ucall exit mmio address access"); > memcpy(&gva, run->mmio.data, sizeof(gva)); > - memcpy(uc, addr_gva2hva(vm, gva), sizeof(*uc)); > + memcpy(&ucall, addr_gva2hva(vm, gva), sizeof(ucall)); > + got_ucall = true; > + } > + > + if (got_ucall) { > + vcpu_run_complete_io(vm, vcpu_id); > + if (uc) > + memcpy(uc, &ucall, sizeof(ucall)); > } > > - return uc->cmd; > + return ucall.cmd; > } > Queued, thanks. Paolo