On Thu, Apr 20, 2023, Jinrong Liang wrote: > From: Jinrong Liang <cloudliang@xxxxxxxxxxx> > > From: Jinrong Liang <cloudliang@xxxxxxxxxxx> > > Add tests to cover that pmu_event_filter works as expected when > it's applied to fixed performance counters, even if there is none > fixed counter exists (e.g. Intel guest pmu version=1 or AMD guest). > > Signed-off-by: Jinrong Liang <cloudliang@xxxxxxxxxxx> > --- > .../kvm/x86_64/pmu_event_filter_test.c | 109 ++++++++++++++++++ > 1 file changed, 109 insertions(+) > > diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c > index a3d5c30ce914..0f54c53d7fff 100644 > --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c > +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c > @@ -31,6 +31,7 @@ > #define PMU_EVENT_FILTER_INVALID_ACTION (KVM_PMU_EVENT_DENY + 1) > #define PMU_EVENT_FILTER_INVALID_FLAGS (KVM_PMU_EVENT_FLAG_MASKED_EVENTS + 1) > #define PMU_EVENT_FILTER_INVALID_NEVENTS (MAX_FILTER_EVENTS + 1) > +#define INTEL_PMC_IDX_FIXED 32 > > /* > * This is how the event selector and unit mask are stored in an AMD > @@ -817,6 +818,113 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu) > } > } > > +static void intel_guest_run_fixed_counters(uint8_t fixed_ctr_idx) > +{ > + for (;;) { > + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); > + wrmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx, 0); > + > + /* Only OS_EN bit is enabled for fixed counter[idx]. */ > + wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * fixed_ctr_idx)); > + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, > + BIT_ULL(INTEL_PMC_IDX_FIXED + fixed_ctr_idx)); > + __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); > + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); > + > + GUEST_SYNC(rdmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx)); > + } > +} > + > +static struct kvm_vcpu *new_vcpu(void *guest_code) > +{ > + struct kvm_vm *vm; > + struct kvm_vcpu *vcpu; > + > + vm = vm_create_with_one_vcpu(&vcpu, guest_code); > + vm_init_descriptor_tables(vm); > + vcpu_init_descriptor_tables(vcpu); Unnecessary copy+paste, this test doesn't setup a #GP handler. > + > + return vcpu; > +} > + > +static void free_vcpu(struct kvm_vcpu *vcpu) > +{ > + kvm_vm_free(vcpu->vm); > +} > + > +static uint64_t test_fixed_ctr_without_filter(struct kvm_vcpu *vcpu) > +{ > + return run_vcpu_to_sync(vcpu); > +} Please don't add a wrappers that are single line passthroughs. > +static const uint32_t actions[] = { > + KVM_PMU_EVENT_ALLOW, > + KVM_PMU_EVENT_DENY, > +}; (a) don't define global variables with super common names (this test sets a bad precedent). (b) this array is used in *one* function, i.e. it can be a local variable. (c) using an array doesn't save you code and just obfuscates what's happening. > +static uint64_t test_fixed_ctr_with_filter(struct kvm_vcpu *vcpu, Don't abbreviate "counter", there's really no need and "ctr" versus "ctrl" is already confusing enough. > + uint32_t action, > + uint32_t bitmap) > +{ > + struct kvm_pmu_event_filter *f; > + uint64_t r; > + > + f = create_pmu_event_filter(0, 0, action, 0, bitmap); > + r = test_with_filter(vcpu, f); > + free(f); > + return r; > +} > + > +static bool fixed_ctr_is_allowed(uint8_t idx, uint32_t action, uint32_t bitmap) > +{ > + return (action == KVM_PMU_EVENT_ALLOW && (bitmap & BIT_ULL(idx))) || > + (action == KVM_PMU_EVENT_DENY && !(bitmap & BIT_ULL(idx))); This helper shouldn't exist. It's largely a symptom of using an array. > +} > + > +static void test_fixed_ctr_action_and_bitmap(struct kvm_vcpu *vcpu, > + uint8_t fixed_ctr_idx, > + uint8_t max_fixed_num) > +{ > + uint8_t i; > + uint32_t bitmap; > + uint64_t count; > + bool expected; > + > + /* > + * Check the fixed performance counter can count normally works when > + * KVM userspace doesn't set any pmu filter. > + */ > + TEST_ASSERT(max_fixed_num && test_fixed_ctr_without_filter(vcpu), > + "Fixed counter does not exist or does not work as expected."); > + > + for (i = 0; i < ARRAY_SIZE(actions); i++) { > + for (bitmap = 0; bitmap < BIT_ULL(max_fixed_num); bitmap++) { You're comparing a 32-bit value against a 64-bit value. > + expected = fixed_ctr_is_allowed(fixed_ctr_idx, actions[i], bitmap); > + count = test_fixed_ctr_with_filter(vcpu, actions[i], bitmap); > + > + TEST_ASSERT(expected == !!count, > + "Fixed event filter does not work as expected."); > + } > + } static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu, uint32_t action, uint32_t bitmap) { ... } static void __test_fixed_counter_bitmap(...) { uint32_t bitmap; TEST_ASSERT(nr_fixed_counters < sizeof(bitmap)); for (i = 0; i < BIT(nr_fixed_counters); i++) { count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_ALLOW, bitmap); TEST_ASSERT(!!count == !!(bitmap & BIT(idx))); count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_DENY, bitmap); TEST_ASSERT(!!count == !(bitmap & BIT(idx))); } } > +} > + > +static void test_fixed_counter_bitmap(void) > +{ > + struct kvm_vcpu *vcpu; > + uint8_t idx, max_fixed_num = get_kvm_supported_fixed_num(); > + > + /* > + * Check that pmu_event_filter works as expected when it's applied to > + * fixed performance counters. > + */ > + for (idx = 0; idx < max_fixed_num; idx++) { > + vcpu = new_vcpu(intel_guest_run_fixed_counters); > + vcpu_args_set(vcpu, 1, idx); > + test_fixed_ctr_action_and_bitmap(vcpu, idx, max_fixed_num); > + free_vcpu(vcpu); > + } > +} > + > int main(int argc, char *argv[]) > { > void (*guest_code)(void); > @@ -860,6 +968,7 @@ int main(int argc, char *argv[]) > kvm_vm_free(vm); > > test_pmu_config_disable(guest_code); > + test_fixed_counter_bitmap(); > > return 0; > } > -- > 2.31.1 >