On Wed Mar 20, 2024 at 12:28 AM AEST, Gautam Menghani wrote: > PAPR hypervisor has introduced three new counters in the VPA area of > LPAR CPUs for KVM L2 guest (see [1] for terminology) observability - 2 > for context switches from host to guest and vice versa, and 1 counter > for getting the total time spent inside the KVM guest. Add a tracepoint > that enables reading the counters for use by ftrace/perf. Note that this > tracepoint is only available for nestedv2 API (i.e, KVM on PowerVM). > > [1] Terminology: > a. L1 refers to the VM (LPAR) booted on top of PAPR hypervisor > b. L2 refers to the KVM guest booted on top of L1. > > Signed-off-by: Vaibhav Jain <vaibhav@xxxxxxxxxxxxx> > Signed-off-by: Gautam Menghani <gautam@xxxxxxxxxxxxx> > --- > arch/powerpc/include/asm/kvm_host.h | 5 +++++ > arch/powerpc/include/asm/lppaca.h | 11 ++++++++--- > arch/powerpc/kvm/book3s_hv.c | 20 ++++++++++++++++++++ > arch/powerpc/kvm/trace_hv.h | 24 ++++++++++++++++++++++++ > 4 files changed, 57 insertions(+), 3 deletions(-) > > diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h > index 8abac5321..26d7bb4b9 100644 > --- a/arch/powerpc/include/asm/kvm_host.h > +++ b/arch/powerpc/include/asm/kvm_host.h > @@ -847,6 +847,11 @@ struct kvm_vcpu_arch { > gpa_t nested_io_gpr; > /* For nested APIv2 guests*/ > struct kvmhv_nestedv2_io nestedv2_io; > + > + /* For VPA counters having context switch and guest run time info (in ns) */ > + u64 l1_to_l2_cs; > + u64 l2_to_l1_cs; > + u64 l2_runtime; > #endif > > #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING These aren't required here if it's just used for tracing over a single run vcpu call are they? > diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h > index 61ec2447d..bda6b86b9 100644 > --- a/arch/powerpc/include/asm/lppaca.h > +++ b/arch/powerpc/include/asm/lppaca.h > @@ -62,7 +62,8 @@ struct lppaca { > u8 donate_dedicated_cpu; /* Donate dedicated CPU cycles */ > u8 fpregs_in_use; > u8 pmcregs_in_use; > - u8 reserved8[28]; > + u8 l2_accumul_cntrs_enable; /* Enable usage of counters for KVM guest */ > + u8 reserved8[27]; > __be64 wait_state_cycles; /* Wait cycles for this proc */ > u8 reserved9[28]; > __be16 slb_count; /* # of SLBs to maintain */ > @@ -92,9 +93,13 @@ struct lppaca { > /* cacheline 4-5 */ > > __be32 page_ins; /* CMO Hint - # page ins by OS */ > - u8 reserved12[148]; > + u8 reserved12[28]; > + volatile __be64 l1_to_l2_cs_tb; > + volatile __be64 l2_to_l1_cs_tb; > + volatile __be64 l2_runtime_tb; > + u8 reserved13[96]; > volatile __be64 dtl_idx; /* Dispatch Trace Log head index */ > - u8 reserved13[96]; > + u8 reserved14[96]; > } ____cacheline_aligned; > > #define lppaca_of(cpu) (*paca_ptrs[cpu]->lppaca_ptr) > diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c > index 2b04eba90..b94461b5f 100644 > --- a/arch/powerpc/kvm/book3s_hv.c > +++ b/arch/powerpc/kvm/book3s_hv.c > @@ -4092,6 +4092,7 @@ static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit, > unsigned long msr, i; > int trap; > long rc; > + struct lppaca *lp = get_lppaca(); Does get_lppaca() emit some inline asm that can't be optimised? Could move it under the unlikely branches if so. > > io = &vcpu->arch.nestedv2_io; > KVM L0 could in theory provide this for v1 L1s too, so could this be done at a higher level to cover both? > @@ -4107,6 +4108,17 @@ static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit, > kvmppc_gse_put_u64(io->vcpu_run_input, KVMPPC_GSID_LPCR, lpcr); > > accumulate_time(vcpu, &vcpu->arch.in_guest); > + > + /* Reset the guest host context switch timing */ > + if (unlikely(trace_kvmppc_vcpu_exit_cs_time_enabled())) { > + lp->l2_accumul_cntrs_enable = 1; > + lp->l1_to_l2_cs_tb = 0; > + lp->l2_to_l1_cs_tb = 0; > + lp->l2_runtime_tb = 0; > + } else { > + lp->l2_accumul_cntrs_enable = 0; > + } Instead of zeroing here zero after the exit, which avoids the else branch and possibly avoids an obscure race with the counters. What if trace_kvmppc_vcpu_exit_cs_time_enabled() is false here... > + > rc = plpar_guest_run_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id, > &trap, &i); > > @@ -4133,6 +4145,14 @@ static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit, > > timer_rearm_host_dec(*tb); > > + /* Record context switch and guest_run_time data */ > + if (unlikely(trace_kvmppc_vcpu_exit_cs_time_enabled())) { > + vcpu->arch.l1_to_l2_cs = tb_to_ns(be64_to_cpu(lp->l1_to_l2_cs_tb)); > + vcpu->arch.l2_to_l1_cs = tb_to_ns(be64_to_cpu(lp->l2_to_l1_cs_tb)); > + vcpu->arch.l2_runtime = tb_to_ns(be64_to_cpu(lp->l2_runtime_tb)); > + trace_kvmppc_vcpu_exit_cs_time(vcpu); > + } ... and true here. If it had been previously true then it would trace stale values I think? Would something like this work? if (unlikely(trace_kvmppc_vcpu_exit_cs_time_enabled())) get_lppaca()->l2_accumul_cntrs_enable = 1; [run vcpu ; ...] if (unlikely(trace_kvmppc_vcpu_exit_cs_time_enabled())) do_trace_nested_cs_time(vcpu); ... static void do_trace_nested_cs_time(struct vcpu *vcpu) { struct lppaca *lp = get_lppaca(); u64 l1_to_l2, l2_to_l1, l2_runtime; if (!lp->l2_accumul_cntrs_enable) return; l1_to_l2 = tb_to_ns(be64_to_cpu(lp->l1_to_l2_cs_tb)); l2_to_l1 = tb_to_ns(be64_to_cpu(lp->l2_to_l1_cs_tb)); l2_runtime = tb_to_ns(be64_to_cpu(lp->l2_runtime_tb)); trace_kvmppc_vcpu_exit_cs_time(vcpu->cpu_id, l1_to_l2, l2_to_l1, l2_runtime); lp->l1_to_l2_cs_tb = 0; lp->l2_to_l1_cs_tb = 0; lp->l2_runtime_tb = 0; lp->l2_accumul_cntrs_enable = 0; }