Re: [PATCH v3 03/11] KVM: selftests: Test Intel PMU architectural events on gp counters

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Aug 14, 2023, Jinrong Liang wrote:
> +static void test_arch_events_cpuid(struct kvm_vcpu *vcpu,
> +				   uint8_t arch_events_bitmap_size,
> +				   uint8_t arch_events_unavailable_mask,
> +				   uint8_t idx)
> +{
> +	uint64_t counter_val = 0;
> +	bool is_supported;
> +
> +	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH,
> +				arch_events_bitmap_size);
> +	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_EVENTS_MASK,
> +				arch_events_unavailable_mask);
> +
> +	is_supported = arch_event_is_supported(vcpu, idx);
> +	vcpu_args_set(vcpu, 1, intel_arch_events[idx]);
> +
> +	while (run_vcpu(vcpu, &counter_val) != UCALL_DONE)
> +		TEST_ASSERT_EQ(is_supported, !!counter_val);
> +}
> +
> +static void intel_check_arch_event_is_unavl(uint8_t idx)
> +{
> +	uint8_t eax_evt_vec, ebx_unavl_mask, i, j;
> +	struct kvm_vcpu *vcpu;
> +	struct kvm_vm *vm;
> +
> +	/*
> +	 * A brute force iteration of all combinations of values is likely to
> +	 * exhaust the limit of the single-threaded thread fd nums, so it's
> +	 * tested here by iterating through all valid values on a single bit.
> +	 */
> +	for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) {
> +		eax_evt_vec = BIT_ULL(i);
> +		for (j = 0; j < ARRAY_SIZE(intel_arch_events); j++) {
> +			ebx_unavl_mask = BIT_ULL(j);
> +			vm = pmu_vm_create_with_one_vcpu(&vcpu,
> +							 guest_measure_loop);
> +			test_arch_events_cpuid(vcpu, eax_evt_vec,
> +					       ebx_unavl_mask, idx);
> +
> +			kvm_vm_free(vm);

This is messy.  If you're going to use a helper, then use the helper.  If not,
then open code everything.  Half and half just makes everything unnecessarily
hard to follow.  E.g. if you reorganize things, and move even more checks into
the guest, I think you can end up with:


static void test_arch_events_cpuid(uint8_t i, uint8_t j, uint8_t idx)
{
	uint8_t eax_evt_vec = BIT_ULL(i);
	uint8_t ebx_unavl_mask = BIT_ULL(j);
	struct kvm_vcpu *vcpu;
	struct kvm_vm *vm;

	vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_measure_loop);

	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH,
				arch_events_bitmap_size);
	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_EVENTS_MASK,
				arch_events_unavailable_mask);

	vcpu_args_set(vcpu, 1, idx);

	run_vcpu(vcpu, &counter_val)

	kvm_vm_free(vm);
}

static void intel_check_arch_event_is_unavl(uint8_t idx)
{
	/*
	 * A brute force iteration of all combinations of values is likely to
	 * exhaust the limit of the single-threaded thread fd nums, so it's
	 * tested here by iterating through all valid values on a single bit.
	 */
	for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) {
		eax_evt_vec = BIT_ULL(i);
		for (j = 0; j < ARRAY_SIZE(intel_arch_events); j++)
			test_arch_events_cpuid(i, j, idx);
	}
}



[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux