On Mon, 09 May 2022 10:54:59 +0100, Fuad Tabba <tabba@xxxxxxxxxx> wrote: > > Instead of the host accessing hyp data directly, pass the pmu > events of the current cpu to hyp via the vcpu. > > This adds 64 bits (in two fields) to the vcpu that need to be > synced before every vcpu run in nvhe and protected modes. > However, it isolates the hypervisor from the host, which allows > us to use pmu in protected mode in a subsequent patch. > > No functional change intended. > > Signed-off-by: Fuad Tabba <tabba@xxxxxxxxxx> > --- > arch/arm64/include/asm/kvm_host.h | 8 ++------ > arch/arm64/kvm/hyp/nvhe/switch.c | 20 ++++++-------------- > arch/arm64/kvm/pmu-emul.c | 3 +++ > arch/arm64/kvm/pmu.c | 12 ++++-------- > include/kvm/arm_pmu.h | 6 ++++++ > 5 files changed, 21 insertions(+), 28 deletions(-) > > diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h > index dfd360404dd8..90476e713643 100644 > --- a/arch/arm64/include/asm/kvm_host.h > +++ b/arch/arm64/include/asm/kvm_host.h > @@ -273,14 +273,8 @@ struct kvm_cpu_context { > struct kvm_vcpu *__hyp_running_vcpu; > }; > > -struct kvm_pmu_events { > - u32 events_host; > - u32 events_guest; > -}; > - > struct kvm_host_data { > struct kvm_cpu_context host_ctxt; > - struct kvm_pmu_events pmu_events; > }; > > struct kvm_host_psci_config { > @@ -763,6 +757,7 @@ void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); > struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); > > DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data); > +DECLARE_PER_CPU(struct kvm_pmu_events, kvm_pmu_events); > > static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) > { > @@ -821,6 +816,7 @@ void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu); > void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); > void kvm_clr_pmu_events(u32 clr); > > +struct kvm_pmu_events *kvm_get_pmu_events(void); > void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); > void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); > #else > diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c > index 0716163313d6..c61120ec8d1a 100644 > --- a/arch/arm64/kvm/hyp/nvhe/switch.c > +++ b/arch/arm64/kvm/hyp/nvhe/switch.c > @@ -153,13 +153,9 @@ static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu) > /* > * Disable host events, enable guest events > */ > -static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt) > +static bool __pmu_switch_to_guest(struct kvm_vcpu *vcpu) > { > - struct kvm_host_data *host; > - struct kvm_pmu_events *pmu; > - > - host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); > - pmu = &host->pmu_events; > + struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events; > > if (pmu->events_host) > write_sysreg(pmu->events_host, pmcntenclr_el0); > @@ -173,13 +169,9 @@ static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt) > /* > * Disable guest events, enable host events > */ > -static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) > +static void __pmu_switch_to_host(struct kvm_vcpu *vcpu) > { > - struct kvm_host_data *host; > - struct kvm_pmu_events *pmu; > - > - host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); > - pmu = &host->pmu_events; > + struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events; > > if (pmu->events_guest) > write_sysreg(pmu->events_guest, pmcntenclr_el0); > @@ -304,7 +296,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) > host_ctxt->__hyp_running_vcpu = vcpu; > guest_ctxt = &vcpu->arch.ctxt; > > - pmu_switch_needed = __pmu_switch_to_guest(host_ctxt); > + pmu_switch_needed = __pmu_switch_to_guest(vcpu); > > __sysreg_save_state_nvhe(host_ctxt); > /* > @@ -366,7 +358,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) > __debug_restore_host_buffers_nvhe(vcpu); > > if (pmu_switch_needed) > - __pmu_switch_to_host(host_ctxt); > + __pmu_switch_to_host(vcpu); > > /* Returning to host will clear PSR.I, remask PMR if needed */ > if (system_uses_irq_prio_masking()) > diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c > index 3dc990ac4f44..08d0551a4e43 100644 > --- a/arch/arm64/kvm/pmu-emul.c > +++ b/arch/arm64/kvm/pmu-emul.c > @@ -406,6 +406,9 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu) > if (!kvm_vcpu_has_pmu(vcpu)) > return; > > + if (!has_vhe()) > + pmu->events = *kvm_get_pmu_events(); A bit of context: preempt_disable(); /* * The VMID allocator only tracks active VMIDs per * physical CPU, and therefore the VMID allocated may not be * preserved on VMID roll-over if the task was preempted, * making a thread's VMID inactive. So we need to call * kvm_arm_vmid_update() in non-premptible context. */ kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid); kvm_pmu_flush_hwstate(vcpu); local_irq_disable(); You *still* are in a context where an interrupt can fire and mess things up behind your back. Not good. Also, this is now synchronised *twice* per run (once on flush, once on sync). Do we really need this? Thanks, M. -- Without deviation from the norm, progress is not possible. _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm