From: Like Xu <likexu@xxxxxxxxxxx> Because inside reprogram_gp_counter() it is bound to assign the requested eventel to pmc->eventsel, this assignment step can be moved forward, thus simplifying the passing of parameters to "struct kvm_pmc *pmc" only. No functional change intended. Signed-off-by: Like Xu <likexu@xxxxxxxxxxx> --- arch/x86/kvm/pmu.c | 7 +++---- arch/x86/kvm/pmu.h | 2 +- arch/x86/kvm/svm/pmu.c | 3 ++- arch/x86/kvm/vmx/pmu_intel.c | 3 ++- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 0ce33a2798cd..7b8a5f973a63 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -215,16 +215,15 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc) return allow_event; } -void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) +void reprogram_gp_counter(struct kvm_pmc *pmc) { u64 config; u32 type = PERF_TYPE_RAW; + u64 eventsel = pmc->eventsel; if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL) printk_once("kvm pmu: pin control bit is ignored\n"); - pmc->eventsel = eventsel; - pmc_pause_counter(pmc); if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc)) @@ -291,7 +290,7 @@ EXPORT_SYMBOL_GPL(reprogram_fixed_counter); void reprogram_counter(struct kvm_pmc *pmc) { if (pmc_is_gp(pmc)) - reprogram_gp_counter(pmc, pmc->eventsel); + reprogram_gp_counter(pmc); else { int idx = pmc->idx - INTEL_PMC_IDX_FIXED; u8 ctrl = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl, idx); diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index b529c54dc309..4db50c290c62 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -140,7 +140,7 @@ static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value) return sample_period; } -void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel); +void reprogram_gp_counter(struct kvm_pmc *pmc); void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx); void reprogram_counter(struct kvm_pmc *pmc); diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index d4de52409335..7ff9ccaca0a4 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -265,7 +265,8 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (data == pmc->eventsel) return 0; if (!(data & pmu->reserved_bits)) { - reprogram_gp_counter(pmc, data); + pmc->eventsel = data; + reprogram_gp_counter(pmc); return 0; } } diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 20f2b5f5102b..2eefde7e4b1a 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -448,7 +448,8 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (data == pmc->eventsel) return 0; if (!(data & pmu->reserved_bits)) { - reprogram_gp_counter(pmc, data); + pmc->eventsel = data; + reprogram_gp_counter(pmc); return 0; } } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) -- 2.35.1