On Wed, May 22, 2019 at 12:50:43PM +0100, Marc Zyngier wrote: > On 22/05/2019 11:35, Andrew Murray wrote: > > On Tue, May 21, 2019 at 05:31:47PM +0100, Marc Zyngier wrote: > >> On 21/05/2019 16:52, Andrew Murray wrote: > >>> ARMv8 provides support for chained PMU counters, where an event type > >>> of 0x001E is set for odd-numbered counters, the event counter will > >>> increment by one for each overflow of the preceding even-numbered > >>> counter. Let's emulate this in KVM by creating a 64 bit perf counter > >>> when a user chains two emulated counters together. > >>> > >>> For chained events we only support generating an overflow interrupt > >>> on the high counter. We use the attributes of the low counter to > >>> determine the attributes of the perf event. > >>> > >>> Suggested-by: Marc Zyngier <marc.zyngier@xxxxxxx> > >>> Signed-off-by: Andrew Murray <andrew.murray@xxxxxxx> > >>> --- > >>> include/kvm/arm_pmu.h | 2 + > >>> virt/kvm/arm/pmu.c | 246 ++++++++++++++++++++++++++++++++++++------ > >>> 2 files changed, 215 insertions(+), 33 deletions(-) > >>> > >>> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h > >>> index b73f31baca52..8b434745500a 100644 > >>> --- a/include/kvm/arm_pmu.h > >>> +++ b/include/kvm/arm_pmu.h > >>> @@ -22,6 +22,7 @@ > >>> #include <asm/perf_event.h> > >>> > >>> #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) > >>> +#define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1) > >>> > >>> #ifdef CONFIG_KVM_ARM_PMU > >>> > >>> @@ -34,6 +35,7 @@ struct kvm_pmc { > >>> struct kvm_pmu { > >>> int irq_num; > >>> struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; > >>> + DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS); > >>> bool ready; > >>> bool created; > >>> bool irq_level; > >>> diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c > >>> index ae1e886d4a1a..4b0981c402c6 100644 > >>> --- a/virt/kvm/arm/pmu.c > >>> +++ b/virt/kvm/arm/pmu.c > >>> @@ -25,28 +25,128 @@ > >>> #include <kvm/arm_vgic.h> > >>> > >>> static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx); > >>> + > >>> +#define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1 > >>> + > >>> +static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc) > >>> +{ > >>> + struct kvm_pmu *pmu; > >>> + struct kvm_vcpu_arch *vcpu_arch; > >>> + > >>> + pmc -= pmc->idx; > >>> + pmu = container_of(pmc, struct kvm_pmu, pmc[0]); > >>> + vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu); > >>> + return container_of(vcpu_arch, struct kvm_vcpu, arch); > >>> +} > >>> + > >>> /** > >>> - * kvm_pmu_get_counter_value - get PMU counter value > >>> + * kvm_pmu_pmc_is_chained - determine if the pmc is chained > >>> + * @pmc: The PMU counter pointer > >>> + */ > >>> +static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc) > >>> +{ > >>> + struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); > >>> + > >>> + return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); > >>> +} > >>> + > >>> +/** > >>> + * kvm_pmu_pmc_is_high_counter - determine if select_idx is a high/low counter > >>> + * @select_idx: The counter index > >>> + */ > >>> +static bool kvm_pmu_pmc_is_high_counter(u64 select_idx) > >>> +{ > >>> + return select_idx & 0x1; > >>> +} > >>> + > >>> +/** > >>> + * kvm_pmu_get_canonical_pmc - obtain the canonical pmc > >>> + * @pmc: The PMU counter pointer > >>> + * > >>> + * When a pair of PMCs are chained together we use the low counter (canonical) > >>> + * to hold the underlying perf event. > >>> + */ > >>> +static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc) > >>> +{ > >>> + if (kvm_pmu_pmc_is_chained(pmc) && > >>> + kvm_pmu_pmc_is_high_counter(pmc->idx)) > >>> + return pmc - 1; > >>> + > >>> + return pmc; > >>> +} > >>> + > >>> +/** > >>> + * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain > >>> * @vcpu: The vcpu pointer > >>> * @select_idx: The counter index > >>> */ > >>> -u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) > >>> +static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx) > >>> { > >>> - u64 counter, reg, enabled, running; > >>> - struct kvm_pmu *pmu = &vcpu->arch.pmu; > >>> - struct kvm_pmc *pmc = &pmu->pmc[select_idx]; > >>> + u64 eventsel, reg; > >>> > >>> - reg = (select_idx == ARMV8_PMU_CYCLE_IDX) > >>> - ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx; > >>> - counter = __vcpu_sys_reg(vcpu, reg); > >>> + select_idx |= 0x1; > >>> + > >>> + if (select_idx == ARMV8_PMU_CYCLE_IDX) > >>> + return false; > >>> + > >>> + reg = PMEVTYPER0_EL0 + select_idx; > >>> + eventsel = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_EVENT; > >>> + > >>> + return armv8pmu_evtype_is_chain(eventsel); > >>> +} > >>> + > >>> +/** > >>> + * kvm_pmu_get_pair_counter_value - get PMU counter value > >>> + * @vcpu: The vcpu pointer > >>> + * @pmc: The PMU counter pointer > >>> + */ > >>> +static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu, > >>> + struct kvm_pmc *pmc) > >>> +{ > >>> + u64 counter, counter_high, reg, enabled, running; > >>> + > >>> + if (kvm_pmu_pmc_is_chained(pmc)) { > >>> + pmc = kvm_pmu_get_canonical_pmc(pmc); > >>> + reg = PMEVCNTR0_EL0 + pmc->idx; > >>> + > >>> + counter = __vcpu_sys_reg(vcpu, reg); > >>> + counter_high = __vcpu_sys_reg(vcpu, reg + 1); > >>> + > >>> + counter = lower_32_bits(counter) | (counter_high << 32); > >>> + } else { > >>> + reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX) > >>> + ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx; > >>> + counter = __vcpu_sys_reg(vcpu, reg); > >>> + } > >>> > >>> - /* The real counter value is equal to the value of counter register plus > >>> + /* > >>> + * The real counter value is equal to the value of counter register plus > >>> * the value perf event counts. > >>> */ > >>> if (pmc->perf_event) > >>> counter += perf_event_read_value(pmc->perf_event, &enabled, > >>> &running); > >>> > >>> + return counter; > >>> +} > >>> + > >>> +/** > >>> + * kvm_pmu_get_counter_value - get PMU counter value > >>> + * @vcpu: The vcpu pointer > >>> + * @select_idx: The counter index > >>> + */ > >>> +u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) > >>> +{ > >>> + u64 counter; > >>> + struct kvm_pmu *pmu = &vcpu->arch.pmu; > >>> + struct kvm_pmc *pmc = &pmu->pmc[select_idx]; > >>> + > >>> + counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); > >>> + > >>> + if (kvm_pmu_pmc_is_chained(pmc) && > >>> + kvm_pmu_pmc_is_high_counter(select_idx)) > >>> + counter >>= 32; > >>> + > >>> return counter & pmc->bitmask; > >>> } > >>> > >>> @@ -74,6 +174,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) > >>> */ > >>> static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) > >>> { > >>> + pmc = kvm_pmu_get_canonical_pmc(pmc); > >>> if (pmc->perf_event) { > >>> perf_event_disable(pmc->perf_event); > >>> perf_event_release_kernel(pmc->perf_event); > >>> @@ -91,13 +192,24 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc) > >>> { > >>> u64 counter, reg; > >>> > >>> - if (pmc->perf_event) { > >>> + pmc = kvm_pmu_get_canonical_pmc(pmc); > >>> + if (!pmc->perf_event) > >>> + return; > >>> + > >>> + if (kvm_pmu_pmc_is_chained(pmc)) { > >>> + counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); > >>> + > >>> + reg = PMEVCNTR0_EL0 + pmc->idx; > >>> + __vcpu_sys_reg(vcpu, reg) = counter & pmc->bitmask; > >>> + __vcpu_sys_reg(vcpu, reg + 1) = (counter >> 32) & pmc->bitmask; > >> > >> There is something odd here: You use the same mask for both half of the > >> counter. The second one doesn't make much sense, and the first one makes > >> me wonder... Why isn't bitmask a 64bit quantity in this case? > >> > > > > Yes it's incorrect, the second bitmask should have been pmc+1's bitmask. (In > > the previous revision of this series the sysreg values were populated by two > > calls to kvm_pmu_get_counter_value with pmc and pmc+1 - I introduced this error > > when using kvm_pmu_get_pair_counter_value instead). > > > > My rationale has been that the __vcpu_sys_reg's should represent the underlying > > hardware registers. This means a 64 bit register with the first 32 bits RES0 for > > PMEVCNTR<n> registers (chained or otherwise) and a 64 bit register for PMCCNTR. > > We currently use the bitmask to mask off the RES0 bits in kvm_pmu_get_counter_value > > when requested by access_pmu_evcntr (to match the counter width). (And thus I've > > treated bitmask as the width of the counter *within* each register). > > Well, the truncation is a property of the counter registers, and that's > what we should honor. The bitmask is a property associated to the perf > event, allowing us to only consider the useful bits. > > > It may be possible, for chained counters, to use only the register value and > > bitmask in the canonical (just as we do now for the perf_event). Thus for chained > > counters the bitmask is stored in the low counter and is always 64 bits, and the > > 64 bit counter value is also only stored in the low counter vcpu_sys_reg register. > > > > This means we could calculate the sample_period with the canonical bitmask (instead > > of the hunk you also commented on). However it means that in kvm_pmu_get_counter_value > > we'd have to mask out the RES0 bits indexes that are not the cycle counter. We > > would also have to write the value of the high counter upon demotion from chained > > to unchained in kvm_pmu_update_pmc_chained. > > > > Does this seem a better approach to you? > > It would be much better. It would certainly make it clear that there is > a difference between the perf_event and the emulated counter. It looks like we don't really need the bitmask seeing as it the information it holds can be deduced by the pmc->idx and ARM ARM. I'll respin with the latest feedback and without the bitmask. Thanks for the feedback. Thanks, Andrew Murray > > Thanks, > > M. > -- > Jazz is not dead. It just smells funny... _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm