On 21/10/19 12:55, Like Xu wrote: > Replace the explicit declaration of "u64 reprogram_pmi" with the generic > macro DECLARE_BITMAP for all possible appropriate number of bits. > > Suggested-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> > Signed-off-by: Like Xu <like.xu@xxxxxxxxxxxxxxx> > --- > arch/x86/include/asm/kvm_host.h | 2 +- > arch/x86/kvm/pmu.c | 15 +++++---------- > 2 files changed, 6 insertions(+), 11 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 50eb430b0ad8..236a876a5a2e 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -469,7 +469,7 @@ struct kvm_pmu { > struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC]; > struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED]; > struct irq_work irq_work; > - u64 reprogram_pmi; > + DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX); > }; > > struct kvm_pmu_ops; > diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c > index 46875bbd0419..75e8f9fae031 100644 > --- a/arch/x86/kvm/pmu.c > +++ b/arch/x86/kvm/pmu.c > @@ -62,8 +62,7 @@ static void kvm_perf_overflow(struct perf_event *perf_event, > struct kvm_pmc *pmc = perf_event->overflow_handler_context; > struct kvm_pmu *pmu = pmc_to_pmu(pmc); > > - if (!test_and_set_bit(pmc->idx, > - (unsigned long *)&pmu->reprogram_pmi)) { > + if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) { > __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); > kvm_make_request(KVM_REQ_PMU, pmc->vcpu); > } > @@ -76,8 +75,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event, > struct kvm_pmc *pmc = perf_event->overflow_handler_context; > struct kvm_pmu *pmu = pmc_to_pmu(pmc); > > - if (!test_and_set_bit(pmc->idx, > - (unsigned long *)&pmu->reprogram_pmi)) { > + if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) { > __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); > kvm_make_request(KVM_REQ_PMU, pmc->vcpu); > > @@ -137,7 +135,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, > } > > pmc->perf_event = event; > - clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi); > + clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); > } > > void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) > @@ -253,16 +251,13 @@ EXPORT_SYMBOL_GPL(reprogram_counter); > void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) > { > struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); > - u64 bitmask; > int bit; > > - bitmask = pmu->reprogram_pmi; > - > - for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) { > + for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) { > struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit); > > if (unlikely(!pmc || !pmc->perf_event)) { > - clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi); > + clear_bit(bit, pmu->reprogram_pmi); > continue; > } > > Queued, thanks. Paolo