On 1/11/2025 6:20 AM, Sean Christopherson wrote: > Now that the binary stats cache infrastructure is largely scope agnostic, > add support for vCPU-scoped stats. Like VM stats, open and cache the > stats FD when the vCPU is created so that it's guaranteed to be valid when > vcpu_get_stats() is invoked. > > Account for the extra per-vCPU file descriptor in kvm_set_files_rlimit(), > so that tests that create large VMs don't run afoul of resource limits. > > To sanity check that the infrastructure actually works, and to get a bit > of bonus coverage, add an assert in x86's xapic_ipi_test to verify that > the number of HLTs executed by the test matches the number of HLT exits > observed by KVM. > > Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> > --- > .../testing/selftests/kvm/include/kvm_util.h | 20 +++++++----- > tools/testing/selftests/kvm/lib/kvm_util.c | 32 ++++++++----------- > .../selftests/kvm/x86/xapic_ipi_test.c | 2 ++ > 3 files changed, 27 insertions(+), 27 deletions(-) > > diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h > index d4670b5962ab..373912464fb4 100644 > --- a/tools/testing/selftests/kvm/include/kvm_util.h > +++ b/tools/testing/selftests/kvm/include/kvm_util.h > @@ -61,6 +61,7 @@ struct kvm_vcpu { > #ifdef __x86_64__ > struct kvm_cpuid2 *cpuid; > #endif > + struct kvm_binary_stats stats; > struct kvm_dirty_gfn *dirty_gfns; > uint32_t fetch_index; > uint32_t dirty_gfns_count; > @@ -534,17 +535,20 @@ void read_stat_data(int stats_fd, struct kvm_stats_header *header, > struct kvm_stats_desc *desc, uint64_t *data, > size_t max_elements); > > -void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data, > - size_t max_elements); > +void kvm_get_stat(struct kvm_binary_stats *stats, const char *name, > + uint64_t *data, size_t max_elements); > > -#define vm_get_stat(vm, stat) \ > -({ \ > - uint64_t data; \ > - \ > - __vm_get_stat(vm, #stat, &data, 1); \ > - data; \ > +#define __get_stat(stats, stat) \ > +({ \ > + uint64_t data; \ > + \ > + kvm_get_stat(stats, #stat, &data, 1); \ > + data; \ > }) > > +#define vm_get_stat(vm, stat) __get_stat(&(vm)->stats, stat) > +#define vcpu_get_stat(vcpu, stat) __get_stat(&(vcpu)->stats, stat) > + > void vm_create_irqchip(struct kvm_vm *vm); > > static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, > diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c > index f49bb504fa72..b1c3c7260902 100644 > --- a/tools/testing/selftests/kvm/lib/kvm_util.c > +++ b/tools/testing/selftests/kvm/lib/kvm_util.c > @@ -415,10 +415,11 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode, > void kvm_set_files_rlimit(uint32_t nr_vcpus) > { > /* > - * Number of file descriptors required, nr_vpucs vCPU fds + an arbitrary > - * number for everything else. > + * Each vCPU will open two file descriptors: the vCPU itself and the > + * vCPU's binary stats file descriptor. Add an arbitrary amount of > + * buffer for all other files a test may open. > */ > - int nr_fds_wanted = nr_vcpus + 100; > + int nr_fds_wanted = nr_vcpus * 2 + 100; > struct rlimit rl; > > /* > @@ -746,6 +747,8 @@ static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu) > ret = close(vcpu->fd); > TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret)); > > + kvm_stats_release(&vcpu->stats); > + > list_del(&vcpu->list); > > vcpu_arch_free(vcpu); > @@ -1339,6 +1342,11 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) > TEST_ASSERT(vcpu->run != MAP_FAILED, > __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED)); > > + if (kvm_has_cap(KVM_CAP_BINARY_STATS_FD)) > + vcpu->stats.fd = vcpu_get_stats_fd(vcpu); > + else > + vcpu->stats.fd = -1; > + > /* Add to linked-list of VCPUs. */ > list_add(&vcpu->list, &vm->vcpus); > > @@ -2251,23 +2259,9 @@ void read_stat_data(int stats_fd, struct kvm_stats_header *header, > desc->name, size, ret); > } > > -/* > - * Read the data of the named stat > - * > - * Input Args: > - * vm - the VM for which the stat should be read > - * stat_name - the name of the stat to read > - * max_elements - the maximum number of 8-byte values to read into data > - * > - * Output Args: > - * data - the buffer into which stat data should be read > - * > - * Read the data values of a specified stat from the binary stats interface. > - */ > -void __vm_get_stat(struct kvm_vm *vm, const char *name, uint64_t *data, > - size_t max_elements) > +void kvm_get_stat(struct kvm_binary_stats *stats, const char *name, > + uint64_t *data, size_t max_elements) > { > - struct kvm_binary_stats *stats = &vm->stats; > struct kvm_stats_desc *desc; > size_t size_desc; > int i; > diff --git a/tools/testing/selftests/kvm/x86/xapic_ipi_test.c b/tools/testing/selftests/kvm/x86/xapic_ipi_test.c > index a76078a08ff8..574a944763b7 100644 > --- a/tools/testing/selftests/kvm/x86/xapic_ipi_test.c > +++ b/tools/testing/selftests/kvm/x86/xapic_ipi_test.c > @@ -465,6 +465,8 @@ int main(int argc, char *argv[]) > cancel_join_vcpu_thread(threads[0], params[0].vcpu); > cancel_join_vcpu_thread(threads[1], params[1].vcpu); > > + TEST_ASSERT_EQ(data->hlt_count, vcpu_get_stat(params[0].vcpu, halt_exits)); > + > fprintf(stderr, > "Test successful after running for %d seconds.\n" > "Sending vCPU sent %lu IPIs to halting vCPU\n" I have tested this infrastructure with xapic_ipi_test and ipi hlt test [1] on AMD system. Tested-by: Manali Shukla <Manali.Shukla@xxxxxxx> [1]: https://lore.kernel.org/kvm/20250103081828.7060-1-manali.shukla@xxxxxxx/T/#mda361fc0892e6949d98de2a4a79f68fc362a2893 -Manali