Re: [PATCH v3 3/5] selftests: kvm/x86: Add testing for masked events

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Sat, Jul 09, 2022, Aaron Lewis wrote:
> Add testing for the pmu event filter's masked events.  These tests run
> through different ways of finding an event the guest is attempting to
> program in an event list.  For any given eventsel, there may be
> multiple instances of it in an event list.  These tests try different
> ways of looking up a match to force the matching algorithm to walk the
> relevant eventsel's and ensure it is able to a) find a match, b) stays
> within its bounds.
> 
> Signed-off-by: Aaron Lewis <aaronlewis@xxxxxxxxxx>
> ---
>  .../kvm/x86_64/pmu_event_filter_test.c        | 99 +++++++++++++++++++
>  1 file changed, 99 insertions(+)
> 
> diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> index 4bff4c71ac45..29abe9c88f4f 100644
> --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> @@ -18,8 +18,12 @@
>  /*
>   * In lieu of copying perf_event.h into tools...
>   */
> +#define ARCH_PERFMON_EVENTSEL_EVENT			0x000000FFULL
>  #define ARCH_PERFMON_EVENTSEL_OS			(1ULL << 17)
>  #define ARCH_PERFMON_EVENTSEL_ENABLE			(1ULL << 22)
> +#define AMD64_EVENTSEL_EVENT	\
> +	(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
> +
>  
>  union cpuid10_eax {
>  	struct {
> @@ -445,6 +449,99 @@ static bool use_amd_pmu(void)
>  		 is_zen3(entry->eax));
>  }
>  
> +#define ENCODE_MASKED_EVENT(select, mask, match, invert) \
> +		KVM_PMU_EVENT_ENCODE_MASKED_EVENT(select, mask, match, invert)

Eww.  Use a helper.  Defining a passthrough macro just to get a shorter name is
silly, AFAICT all usages passes ~0ull / ~0x00 for the mask, and so that the test
can do it's own type checking, e.g. @invert can/should be a boolean.

> +
> +static void expect_success(uint64_t count)
> +{
> +	if (count != NUM_BRANCHES)
> +		pr_info("masked filter: Branch instructions retired = %lu (expected %u)\n",
> +			count, NUM_BRANCHES);

If the number of branches is expected to be precise, then assert on that.  If not,
then I don't see much value in printing anything.

> +	TEST_ASSERT(count, "Allowed PMU event is not counting");
> +}
> +
> +static void expect_failure(uint64_t count)
> +{
> +	if (count)
> +		pr_info("masked filter: Branch instructions retired = %lu (expected 0)\n",
> +			count);
> +	TEST_ASSERT(!count, "Disallowed PMU Event is counting");

Print the debug information in the assert.

> +}
> +
> +static void run_masked_events_test(struct kvm_vm *vm, uint64_t masked_events[],
> +				   const int nmasked_events, uint64_t event,
> +				   uint32_t action,
> +				   void (*expected_func)(uint64_t))

A function callback is overkill and unnecessary obfuscation.  And "expect_failure"
is misleading; the test isn't expected to "fail", rather the event is expected to
not count.

Actually, the function is completely unnecessary, the caller already passed in
ALLOW vs. DENY in action.

> +{
> +	struct kvm_pmu_event_filter *f;
> +	uint64_t old_event;
> +	uint64_t count;
> +	int i;
> +
> +	for (i = 0; i < nmasked_events; i++) {
> +		if ((masked_events[i] & AMD64_EVENTSEL_EVENT) != EVENT(event, 0))
> +			continue;
> +
> +		old_event = masked_events[i];
> +
> +		masked_events[i] =
> +			ENCODE_MASKED_EVENT(event, ~0x00, 0x00, 0);

Why double zeros?  And this easily fits on a single line.

		masked_events[i] = ENCODE_MASKED_EVENT(event, ~0ull, 0, 0);

> +
> +		f = create_pmu_event_filter(masked_events, nmasked_events, action,
> +				   KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
> +
> +		count = test_with_filter(vm, f);
> +		free(f);
> +

As alluded to above...

		if (action == KVM_PMU_EVENT_ALLOW)
			TEST_ASSERT(count, <here be verbose, helpful output>);
		else
			TEST_ASSERT(!count, <more verbose, helpful output>);

> +		expected_func(count);
> +
> +		masked_events[i] = old_event;
> +	}
> +}
> +
> +static void run_masked_events_tests(struct kvm_vm *vm, uint64_t masked_events[],
> +				    const int nmasked_events, uint64_t event)
> +{
> +	run_masked_events_test(vm, masked_events, nmasked_events, event,
> +			       KVM_PMU_EVENT_ALLOW, expect_success);
> +	run_masked_events_test(vm, masked_events, nmasked_events, event,
> +			       KVM_PMU_EVENT_DENY, expect_failure);
> +}
> +
> +static void test_masked_events(struct kvm_vm *vm)
> +{
> +	uint64_t masked_events[11];

Why '11'?

> +	const int nmasked_events = ARRAY_SIZE(masked_events);
> +	uint64_t prev_event, event, next_event;
> +	int i;
> +
> +	if (use_intel_pmu()) {
> +		/* Instructions retired */
> +		prev_event = 0xc0;
> +		event = INTEL_BR_RETIRED;
> +		/* Branch misses retired */
> +		next_event = 0xc5;
> +	} else {
> +		TEST_ASSERT(use_amd_pmu(), "Unknown platform");
> +		/* Retired instructions */
> +		prev_event = 0xc0;
> +		event = AMD_ZEN_BR_RETIRED;
> +		/* Retired branch instructions mispredicted */
> +		next_event = 0xc3;
> +	}
> +
> +	for (i = 0; i < nmasked_events; i++)
> +		masked_events[i] =
> +			ENCODE_MASKED_EVENT(event, ~0x00, i+1, 0);
> +
> +	run_masked_events_tests(vm, masked_events, nmasked_events, event);
> +

Why run the test twice?  Hint, comment... ;-)

> +	masked_events[0] = ENCODE_MASKED_EVENT(prev_event, ~0x00, 0, 0);
> +	masked_events[1] = ENCODE_MASKED_EVENT(next_event, ~0x00, 0, 0);
> +
> +	run_masked_events_tests(vm, masked_events, nmasked_events, event);
> +}
> +
>  int main(int argc, char *argv[])
>  {
>  	void (*guest_code)(void) = NULL;
> @@ -489,6 +586,8 @@ int main(int argc, char *argv[])
>  	test_not_member_deny_list(vm);
>  	test_not_member_allow_list(vm);
>  
> +	test_masked_events(vm);
> +
>  	kvm_vm_free(vm);
>  
>  	test_pmu_config_disable(guest_code);
> -- 
> 2.37.0.144.g8ac04bfd2-goog
> 



[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux