On Tue, Apr 11, 2023 at 10:14:10PM -0700, Reiji Watanabe wrote: > Uh, right, interrupts are not masked during those windows... > > What I am currently considering on this would be disabling > IRQs while manipulating the register, and introducing a new flag > to indicate whether the PMUSERENR for the guest EL0 is loaded, > and having kvm_set_pmuserenr() check the new flag. > > The code would be something like below (local_irq_save/local_irq_restore > needs to be excluded for NVHE though). > > What do you think ? I'm happy with that; it doesn't change the arm_pmu side of the interface and it looks good from a functional perspective. I'll have to leave it to Marc and Oliver to say whether they're happy with the KVM side. Thanks, Mark. > > --- a/arch/arm64/include/asm/kvm_host.h > +++ b/arch/arm64/include/asm/kvm_host.h > @@ -668,6 +668,8 @@ struct kvm_vcpu_arch { > /* Software step state is Active-pending */ > #define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5)) > > +/* PMUSERENR for the guest EL0 is on physical CPU */ > +#define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(6)) > > /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ > #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \ > diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h > index 6718731729fd..57e4f480874a 100644 > --- a/arch/arm64/kvm/hyp/include/hyp/switch.h > +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h > @@ -82,12 +82,19 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) > */ > if (kvm_arm_support_pmu_v3()) { > struct kvm_cpu_context *hctxt; > + unsigned long flags; > > write_sysreg(0, pmselr_el0); > > hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; > + > + local_irq_save(flags); > + > ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0); > write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0); > + vcpu_set_flag(vcpu, PMUSERENR_ON_CPU); > + > + local_irq_restore(flags); > } > > vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2); > @@ -112,9 +119,16 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) > write_sysreg(0, hstr_el2); > if (kvm_arm_support_pmu_v3()) { > struct kvm_cpu_context *hctxt; > + unsigned long flags; > > hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; > + > + local_irq_save(flags); > + > write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0); > + vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU); > + > + local_irq_restore(flags); > } > > if (cpus_have_final_cap(ARM64_SME)) { > diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c > index 40bb2cb13317..33cd8e1ecbd6 100644 > --- a/arch/arm64/kvm/pmu.c > +++ b/arch/arm64/kvm/pmu.c > @@ -221,8 +221,13 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) > bool kvm_set_pmuserenr(u64 val) > { > struct kvm_cpu_context *hctxt; > + struct kvm_vcpu *vcpu; > > - if (!kvm_arm_support_pmu_v3() || !has_vhe() || !kvm_get_running_vcpu()) > + if (!kvm_arm_support_pmu_v3() || !has_vhe()) > + return false; > + > + vcpu = kvm_get_running_vcpu(); > + if (!vcpu || !vcpu_get_flag(vcpu, PMUSERENR_ON_CPU)) > return false; > > hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; > -- > > Thank you, > Reiji > > > > > > Thanks, > > Mark. > > > > > Suggested-by: Mark Rutland <mark.rutland@xxxxxxx> > > > Suggested-by: Marc Zyngier <maz@xxxxxxxxxx> > > > Fixes: 83a7a4d643d3 ("arm64: perf: Enable PMU counter userspace access for perf event") > > > Signed-off-by: Reiji Watanabe <reijiw@xxxxxxxxxx> > > > --- > > > arch/arm64/include/asm/kvm_host.h | 5 +++++ > > > arch/arm64/kernel/perf_event.c | 21 ++++++++++++++++++--- > > > arch/arm64/kvm/pmu.c | 20 ++++++++++++++++++++ > > > 3 files changed, 43 insertions(+), 3 deletions(-) > > > > > > diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h > > > index bcd774d74f34..22db2f885c17 100644 > > > --- a/arch/arm64/include/asm/kvm_host.h > > > +++ b/arch/arm64/include/asm/kvm_host.h > > > @@ -1028,9 +1028,14 @@ void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu); > > > #ifdef CONFIG_KVM > > > void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); > > > void kvm_clr_pmu_events(u32 clr); > > > +bool kvm_set_pmuserenr(u64 val); > > > #else > > > static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} > > > static inline void kvm_clr_pmu_events(u32 clr) {} > > > +static inline bool kvm_set_pmuserenr(u64 val) > > > +{ > > > + return false; > > > +} > > > #endif > > > > > > void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu); > > > diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c > > > index dde06c0f97f3..0fffe4c56c28 100644 > > > --- a/arch/arm64/kernel/perf_event.c > > > +++ b/arch/arm64/kernel/perf_event.c > > > @@ -741,9 +741,25 @@ static inline u32 armv8pmu_getreset_flags(void) > > > return value; > > > } > > > > > > +static void update_pmuserenr(u64 val) > > > +{ > > > + lockdep_assert_irqs_disabled(); > > > + > > > + /* > > > + * The current pmuserenr value might be the value for the guest. > > > + * If that's the case, have KVM keep tracking of the register value > > > + * for the host EL0 so that KVM can restore it before returning to > > > + * the host EL0. Otherwise, update the register now. > > > + */ > > > + if (kvm_set_pmuserenr(val)) > > > + return; > > > + > > > + write_sysreg(val, pmuserenr_el0); > > > +} > > > + > > > static void armv8pmu_disable_user_access(void) > > > { > > > - write_sysreg(0, pmuserenr_el0); > > > + update_pmuserenr(0); > > > } > > > > > > static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu) > > > @@ -759,8 +775,7 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu) > > > armv8pmu_write_evcntr(i, 0); > > > } > > > > > > - write_sysreg(0, pmuserenr_el0); > > > - write_sysreg(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR, pmuserenr_el0); > > > + update_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR); > > > } > > > > > > static void armv8pmu_enable_event(struct perf_event *event) > > > diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c > > > index 7887133d15f0..40bb2cb13317 100644 > > > --- a/arch/arm64/kvm/pmu.c > > > +++ b/arch/arm64/kvm/pmu.c > > > @@ -209,3 +209,23 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) > > > kvm_vcpu_pmu_enable_el0(events_host); > > > kvm_vcpu_pmu_disable_el0(events_guest); > > > } > > > + > > > +/* > > > + * With VHE, keep track of the PMUSERENR_EL0 value for the host EL0 on > > > + * the pCPU where vCPU is loaded, since PMUSERENR_EL0 is switched to > > > + * the value for the guest on vcpu_load(). The value for the host EL0 > > > + * will be restored on vcpu_put(), before returning to the EL0. > > > + * > > > + * Return true if KVM takes care of the register. Otherwise return false. > > > + */ > > > +bool kvm_set_pmuserenr(u64 val) > > > +{ > > > + struct kvm_cpu_context *hctxt; > > > + > > > + if (!kvm_arm_support_pmu_v3() || !has_vhe() || !kvm_get_running_vcpu()) > > > + return false; > > > + > > > + hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; > > > + ctxt_sys_reg(hctxt, PMUSERENR_EL0) = val; > > > + return true; > > > +} > > > -- > > > 2.40.0.577.gac1e443424-goog > > > > > > >