On Mon, Jun 10, 2024, Reinette Chatre wrote: > diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h > index 8eb57de0b587..b473f210ba6c 100644 > --- a/tools/testing/selftests/kvm/include/x86_64/processor.h > +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h > @@ -23,6 +23,7 @@ > > extern bool host_cpu_is_intel; > extern bool host_cpu_is_amd; > +extern unsigned int tsc_khz; > > /* Forced emulation prefix, used to invoke the emulator unconditionally. */ > #define KVM_FEP "ud2; .byte 'k', 'v', 'm';" > @@ -815,6 +816,20 @@ static inline void cpu_relax(void) > asm volatile("rep; nop" ::: "memory"); > } > > +static inline void udelay(unsigned long usec) uint64_t instead of unsigned long? Practically speaking it doesn't change anything, but I don't see any reason to mix "unsigned long" and "uint64_t", e.g. the max delay isn't a property of the address space. > +{ > + unsigned long cycles = tsc_khz / 1000 * usec; > + uint64_t start, now; > + > + start = rdtsc(); > + for (;;) { > + now = rdtsc(); > + if (now - start >= cycles) > + break; > + cpu_relax(); > + } > +} > + > #define ud2() \ > __asm__ __volatile__( \ > "ud2\n" \ > diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c > index c664e446136b..ff579674032f 100644 > --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c > +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c > @@ -25,6 +25,7 @@ vm_vaddr_t exception_handlers; > bool host_cpu_is_amd; > bool host_cpu_is_intel; > bool is_forced_emulation_enabled; > +unsigned int tsc_khz; Slight preference for uint32_t, mostly because KVM stores its version as a u32. > static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent) > { > @@ -616,6 +617,8 @@ void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) > > void kvm_arch_vm_post_create(struct kvm_vm *vm) > { > + int r; > + > vm_create_irqchip(vm); > vm_init_descriptor_tables(vm); > > @@ -628,6 +631,15 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm) > > vm_sev_ioctl(vm, KVM_SEV_INIT2, &init); > } > + > + if (kvm_has_cap(KVM_CAP_GET_TSC_KHZ)) { I think we should make this a TEST_REQUIRE(), or maybe even a TEST_ASSERT(). Support for KVM_GET_TSC_KHZ predates KVM selftests by 7+ years. > + r = __vm_ioctl(vm, KVM_GET_TSC_KHZ, NULL); > + if (r < 0) Heh, the docs are stale. KVM hasn't returned an error since commit cc578287e322 ("KVM: Infrastructure for software and hardware based TSC rate scaling"), which again predates selftests by many years (6+ in this case). To make our lives much simpler, I think we should assert that KVM_GET_TSC_KHZ succeeds, and maybe throw in a GUEST_ASSERT(thz_khz) in udelay()? E.g. as is, if KVM_GET_TSC_KHZ is allowed to fail, then we risk having to deal with weird failures due to udelay() unexpectedly doing nothing. > + tsc_khz = 0; > + else > + tsc_khz = r; > + sync_global_to_guest(vm, tsc_khz); > + } > } > > void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) > -- > 2.34.1 >