Re: [PATCH v5 1/7] kvm: x86/pmu: Correct the mask used in a pmu event filter lookup

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, Sep 20, 2022, Aaron Lewis wrote:
> When checking if a pmu event the guest is attempting to program should
> be filtered, only consider the event select + unit mask in that
> decision. Use an architecture specific mask to mask out all other bits,
> including bits 35:32 on Intel.  Those bits are not part of the event
> select and should not be considered in that decision.
> 
> Fixes: 66bb8a065f5a ("KVM: x86: PMU Event Filter")
> Signed-off-by: Aaron Lewis <aaronlewis@xxxxxxxxxx>
> Reviewed-by: Jim Mattson <jmattson@xxxxxxxxxx>
> ---
>  arch/x86/include/asm/kvm-x86-pmu-ops.h |  1 +
>  arch/x86/kvm/pmu.c                     | 15 ++++++++++++++-
>  arch/x86/kvm/pmu.h                     |  1 +
>  arch/x86/kvm/svm/pmu.c                 |  6 ++++++
>  arch/x86/kvm/vmx/pmu_intel.c           |  6 ++++++
>  5 files changed, 28 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/x86/include/asm/kvm-x86-pmu-ops.h b/arch/x86/include/asm/kvm-x86-pmu-ops.h
> index c17e3e96fc1d..e0280cc3e6e4 100644
> --- a/arch/x86/include/asm/kvm-x86-pmu-ops.h
> +++ b/arch/x86/include/asm/kvm-x86-pmu-ops.h
> @@ -24,6 +24,7 @@ KVM_X86_PMU_OP(set_msr)
>  KVM_X86_PMU_OP(refresh)
>  KVM_X86_PMU_OP(init)
>  KVM_X86_PMU_OP(reset)
> +KVM_X86_PMU_OP(get_eventsel_event_mask)
>  KVM_X86_PMU_OP_OPTIONAL(deliver_pmi)
>  KVM_X86_PMU_OP_OPTIONAL(cleanup)
>  
> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
> index 02f9e4f245bd..98f383789579 100644
> --- a/arch/x86/kvm/pmu.c
> +++ b/arch/x86/kvm/pmu.c
> @@ -247,6 +247,19 @@ static int cmp_u64(const void *pa, const void *pb)
>  	return (a > b) - (a < b);
>  }
>  
> +static inline u64 get_event_select(u64 eventsel)
> +{
> +	return eventsel & static_call(kvm_x86_pmu_get_eventsel_event_mask)();

There's no need to use a callback, both values are constant.  And with a constant,
this line becomes much shorter and this helper goes away.  More below.

> +}
> +
> +static inline u64 get_raw_event(u64 eventsel)

As a general rule, don't tag local static functions as "inline".  Unless something
_needs_ to be inline, the compiler is almost always going to be smarter, and if
something absolutely must be inlined, then __always_inline is requires as "inline"
is purely a hint.

> +{
> +	u64 event_select = get_event_select(eventsel);
> +	u64 unit_mask = eventsel & ARCH_PERFMON_EVENTSEL_UMASK;

And looking forward, I think it makes sense for kvm_pmu_ops to hold the entire
mask.  This code from patch 3 is unnecessarily complex, and bleeds vendor details
where they don't belong.  I'll follow up more in patches 3 and 4.

  static inline u16 get_event_select(u64 eventsel)
  {
        u64 e = eventsel &
                static_call(kvm_x86_pmu_get_eventsel_event_mask)();

        return (e & ARCH_PERFMON_EVENTSEL_EVENT) | ((e >> 24) & 0xF00ULL);
  }

> +
> +	return event_select | unit_mask;
> +}
> +
>  static bool check_pmu_event_filter(struct kvm_pmc *pmc)
>  {
>  	struct kvm_pmu_event_filter *filter;
> @@ -263,7 +276,7 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc)
>  		goto out;
>  
>  	if (pmc_is_gp(pmc)) {
> -		key = pmc->eventsel & AMD64_RAW_EVENT_MASK_NB;
> +		key = get_raw_event(pmc->eventsel);

With the above suggestion, this is simply:

		key = pmc->eventsel & kvm_pmu_.ops.EVENTSEL_MASK;

And the total patch is:

---
 arch/x86/kvm/pmu.c           | 2 +-
 arch/x86/kvm/pmu.h           | 2 ++
 arch/x86/kvm/svm/pmu.c       | 2 ++
 arch/x86/kvm/vmx/pmu_intel.c | 2 ++
 4 files changed, 7 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index d9b9a0f0db17..d0e2c7eda65b 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -273,7 +273,7 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc)
 		goto out;
 
 	if (pmc_is_gp(pmc)) {
-		key = pmc->eventsel & AMD64_RAW_EVENT_MASK_NB;
+		key = pmc->eventsel & kvm_pmu_ops.EVENTSEL_MASK;
 		if (bsearch(&key, filter->events, filter->nevents,
 			    sizeof(__u64), cmp_u64))
 			allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 5cc5721f260b..45a7dd24125d 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -40,6 +40,8 @@ struct kvm_pmu_ops {
 	void (*reset)(struct kvm_vcpu *vcpu);
 	void (*deliver_pmi)(struct kvm_vcpu *vcpu);
 	void (*cleanup)(struct kvm_vcpu *vcpu);
+
+	const u64 EVENTSEL_MASK;
 };
 
 void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index b68956299fa8..6ef7d1fcdbc2 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -228,4 +228,6 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = {
 	.refresh = amd_pmu_refresh,
 	.init = amd_pmu_init,
 	.reset = amd_pmu_reset,
+	.EVENTSEL_MASK = AMD64_EVENTSEL_EVENT |
+			 ARCH_PERFMON_EVENTSEL_UMASK,
 };
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 25b70a85bef5..0a1c95b64ef1 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -811,4 +811,6 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = {
 	.reset = intel_pmu_reset,
 	.deliver_pmi = intel_pmu_deliver_pmi,
 	.cleanup = intel_pmu_cleanup,
+	.EVENTSEL_MASK = ARCH_PERFMON_EVENTSEL_EVENT |
+			 ARCH_PERFMON_EVENTSEL_UMASK,
 };

base-commit: e18d6152ff0f41b7f01f9817372022df04e0d354
-- 




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux