On Fri, Aug 05, 2022 at 02:58:08PM +0100, Marc Zyngier wrote: > In order to reduce the boilerplate code, add two helpers returning > the counter register index (resp. the event register) in the vcpu > register file from the counter index. > > Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> Reviewed-by: Oliver Upton <oliver.upton@xxxxxxxxx> > --- > arch/arm64/kvm/pmu-emul.c | 27 +++++++++++++++------------ > 1 file changed, 15 insertions(+), 12 deletions(-) > > diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c > index 0ab6f59f433c..9be485d23416 100644 > --- a/arch/arm64/kvm/pmu-emul.c > +++ b/arch/arm64/kvm/pmu-emul.c > @@ -75,6 +75,16 @@ static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc) > return container_of(vcpu_arch, struct kvm_vcpu, arch); > } > > +static u32 counter_index_to_reg(u64 idx) > +{ > + return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx; > +} > + > +static u32 counter_index_to_evtreg(u64 idx) > +{ > + return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx; > +} > + > /** > * kvm_pmu_get_counter_value - get PMU counter value > * @vcpu: The vcpu pointer > @@ -89,8 +99,7 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) > if (!kvm_vcpu_has_pmu(vcpu)) > return 0; > > - reg = (select_idx == ARMV8_PMU_CYCLE_IDX) > - ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx; > + reg = counter_index_to_reg(select_idx); > counter = __vcpu_sys_reg(vcpu, reg); > > /* > @@ -120,8 +129,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) > if (!kvm_vcpu_has_pmu(vcpu)) > return; > > - reg = (select_idx == ARMV8_PMU_CYCLE_IDX) > - ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx; > + reg = counter_index_to_reg(select_idx); > __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx); > > /* Recreate the perf event to reflect the updated sample_period */ > @@ -156,10 +164,7 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc) > > counter = kvm_pmu_get_counter_value(vcpu, pmc->idx); > > - if (pmc->idx == ARMV8_PMU_CYCLE_IDX) > - reg = PMCCNTR_EL0; > - else > - reg = PMEVCNTR0_EL0 + pmc->idx; > + reg = counter_index_to_reg(pmc->idx); > > if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx)) > counter = lower_32_bits(counter); > @@ -540,8 +545,7 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx) > struct perf_event_attr attr; > u64 eventsel, counter, reg, data; > > - reg = (select_idx == ARMV8_PMU_CYCLE_IDX) > - ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx; > + reg = counter_index_to_evtreg(select_idx); > data = __vcpu_sys_reg(vcpu, reg); > > kvm_pmu_stop_counter(vcpu, pmc); > @@ -627,8 +631,7 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, > mask &= ~ARMV8_PMU_EVTYPE_EVENT; > mask |= kvm_pmu_event_mask(vcpu->kvm); > > - reg = (select_idx == ARMV8_PMU_CYCLE_IDX) > - ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx; > + reg = counter_index_to_evtreg(select_idx); > > __vcpu_sys_reg(vcpu, reg) = data & mask; > > -- > 2.34.1 >