The PMU architecture makes a subtle difference between a 64bit counter and a counter that has a 64bit overflow. This is for example the case of the cycle counter, which can generate an overflow on a 32bit boundary if PMCR_EL0.LC==0 despite the accumulation being done on 64 bits. Use this distinction in the few cases where it matters in the code, as we will reuse this with PMUv3p5 long counters. Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> --- arch/arm64/kvm/pmu-emul.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 4986e8b3ea6c..9040d3c80096 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -48,6 +48,11 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) * @select_idx: The counter index */ static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx) +{ + return (select_idx == ARMV8_PMU_CYCLE_IDX); +} + +static bool kvm_pmu_idx_has_64bit_overflow(struct kvm_vcpu *vcpu, u64 select_idx) { return (select_idx == ARMV8_PMU_CYCLE_IDX && __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC); @@ -55,7 +60,8 @@ static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx) static bool kvm_pmu_counter_can_chain(struct kvm_vcpu *vcpu, u64 idx) { - return (!(idx & 1) && (idx + 1) < ARMV8_PMU_CYCLE_IDX); + return (!(idx & 1) && (idx + 1) < ARMV8_PMU_CYCLE_IDX && + !kvm_pmu_idx_has_64bit_overflow(vcpu, idx)); } static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc) @@ -95,7 +101,7 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) counter += perf_event_read_value(pmc->perf_event, &enabled, &running); - if (select_idx != ARMV8_PMU_CYCLE_IDX) + if (!kvm_pmu_idx_is_64bit(vcpu, select_idx)) counter = lower_32_bits(counter); return counter; @@ -447,7 +453,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event, */ period = -(local64_read(&perf_event->count)); - if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx)) + if (!kvm_pmu_idx_has_64bit_overflow(vcpu, pmc->idx)) period &= GENMASK(31, 0); local64_set(&perf_event->hw.period_left, 0); @@ -577,7 +583,7 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx) counter = kvm_pmu_get_counter_value(vcpu, select_idx); /* The initial sample period (overflow count) of an event. */ - if (kvm_pmu_idx_is_64bit(vcpu, select_idx)) + if (kvm_pmu_idx_has_64bit_overflow(vcpu, select_idx)) attr.sample_period = (-counter) & GENMASK(63, 0); else attr.sample_period = (-counter) & GENMASK(31, 0); -- 2.34.1