On Tue, Dec 24, 2024 at 6:58 AM Atish Kumar Patra <atishp@xxxxxxxxxxxx> wrote: > > On Mon, Dec 23, 2024 at 6:19 AM Anup Patel <anup@xxxxxxxxxxxxxx> wrote: > > > > On Fri, Dec 13, 2024 at 2:27 AM Atish Patra <atishp@xxxxxxxxxxxx> wrote: > > > > > > Currently, kvm doesn't delegate the few traps such as misaligned > > > load/store, illegal instruction and load/store access faults because it > > > is not expected to occur in the guest very frequent. Thus, kvm gets a > > > chance to act upon it or collect statstics about it before redirecting > > > the traps to the guest. > > > > > > We can collect both guest and host visible statistics during the traps. > > > Enable them so that both guest and host can collect the stats about > > > them if required. > > > > s/We can collect .../Collect .../ > > > > Let me know if I should send a v2. I noticed a couple of other typos > in the commit text as well. Yes, please send v2. Regards, Anup > > > > > > > Signed-off-by: Atish Patra <atishp@xxxxxxxxxxxx> > > > > Otherwise, it looks good to me. > > > > Reviewed-by: Anup Patel <anup@xxxxxxxxxxxxxx> > > > > Regards, > > Anup > > > > > --- > > > arch/riscv/include/asm/kvm_host.h | 5 +++++ > > > arch/riscv/kvm/vcpu.c | 7 ++++++- > > > arch/riscv/kvm/vcpu_exit.c | 5 +++++ > > > 3 files changed, 16 insertions(+), 1 deletion(-) > > > > > > diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h > > > index 35eab6e0f4ae..cc33e35cd628 100644 > > > --- a/arch/riscv/include/asm/kvm_host.h > > > +++ b/arch/riscv/include/asm/kvm_host.h > > > @@ -87,6 +87,11 @@ struct kvm_vcpu_stat { > > > u64 csr_exit_kernel; > > > u64 signal_exits; > > > u64 exits; > > > + u64 instr_illegal_exits; > > > + u64 load_misaligned_exits; > > > + u64 store_misaligned_exits; > > > + u64 load_access_exits; > > > + u64 store_access_exits; > > > }; > > > > > > struct kvm_arch_memory_slot { > > > diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c > > > index e048dcc6e65e..60d684c76c58 100644 > > > --- a/arch/riscv/kvm/vcpu.c > > > +++ b/arch/riscv/kvm/vcpu.c > > > @@ -34,7 +34,12 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { > > > STATS_DESC_COUNTER(VCPU, csr_exit_user), > > > STATS_DESC_COUNTER(VCPU, csr_exit_kernel), > > > STATS_DESC_COUNTER(VCPU, signal_exits), > > > - STATS_DESC_COUNTER(VCPU, exits) > > > + STATS_DESC_COUNTER(VCPU, exits), > > > + STATS_DESC_COUNTER(VCPU, instr_illegal_exits), > > > + STATS_DESC_COUNTER(VCPU, load_misaligned_exits), > > > + STATS_DESC_COUNTER(VCPU, store_misaligned_exits), > > > + STATS_DESC_COUNTER(VCPU, load_access_exits), > > > + STATS_DESC_COUNTER(VCPU, store_access_exits), > > > }; > > > > > > const struct kvm_stats_header kvm_vcpu_stats_header = { > > > diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c > > > index acdcd619797e..6e0c18412795 100644 > > > --- a/arch/riscv/kvm/vcpu_exit.c > > > +++ b/arch/riscv/kvm/vcpu_exit.c > > > @@ -195,22 +195,27 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, > > > switch (trap->scause) { > > > case EXC_INST_ILLEGAL: > > > kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ILLEGAL_INSN); > > > + vcpu->stat.instr_illegal_exits++; > > > ret = vcpu_redirect(vcpu, trap); > > > break; > > > case EXC_LOAD_MISALIGNED: > > > kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_MISALIGNED_LOAD); > > > + vcpu->stat.load_misaligned_exits++; > > > ret = vcpu_redirect(vcpu, trap); > > > break; > > > case EXC_STORE_MISALIGNED: > > > kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_MISALIGNED_STORE); > > > + vcpu->stat.store_misaligned_exits++; > > > ret = vcpu_redirect(vcpu, trap); > > > break; > > > case EXC_LOAD_ACCESS: > > > kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ACCESS_LOAD); > > > + vcpu->stat.load_access_exits++; > > > ret = vcpu_redirect(vcpu, trap); > > > break; > > > case EXC_STORE_ACCESS: > > > kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ACCESS_STORE); > > > + vcpu->stat.store_access_exits++; > > > ret = vcpu_redirect(vcpu, trap); > > > break; > > > case EXC_INST_ACCESS: > > > > > > -- > > > 2.34.1 > > > >