On Tue, Feb 14, 2023, Like Xu wrote: > @@ -574,11 +569,61 @@ static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr) > > int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > { > + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); > + u32 msr = msr_info->index; > + > + switch (msr) { > + case MSR_CORE_PERF_GLOBAL_STATUS: > + msr_info->data = pmu->global_status; > + return 0; > + case MSR_CORE_PERF_GLOBAL_CTRL: > + msr_info->data = pmu->global_ctrl; > + return 0; > + case MSR_CORE_PERF_GLOBAL_OVF_CTRL: > + msr_info->data = 0; > + return 0; > + default: > + break; > + } > + > return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info); > } > > int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > { > + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); > + u32 msr = msr_info->index; > + u64 data = msr_info->data; > + u64 diff; > + > + switch (msr) { > + case MSR_CORE_PERF_GLOBAL_STATUS: > + if (!msr_info->host_initiated || (data & pmu->global_ovf_ctrl_mask)) > + return 1; /* RO MSR */ > + > + pmu->global_status = data; > + return 0; > + case MSR_CORE_PERF_GLOBAL_CTRL: > + if (!kvm_valid_perf_global_ctrl(pmu, data)) > + return 1; > + > + if (pmu->global_ctrl != data) { > + diff = pmu->global_ctrl ^ data; > + pmu->global_ctrl = data; > + reprogram_counters(pmu, diff); > + } > + return 0; > + case MSR_CORE_PERF_GLOBAL_OVF_CTRL: > + if (data & pmu->global_ovf_ctrl_mask) > + return 1; > + > + if (!msr_info->host_initiated) > + pmu->global_status &= ~data; > + return 0; > + default: > + break; > + } > + > kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index); > return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info); > } Please tweak these to follow the patterns for other MSR helpers (see below). I don't actually mind the style, but people get used to the pattern and make mistakes when there are unexpected deviations. int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); u32 msr = msr_info->index; switch (msr) { case MSR_CORE_PERF_GLOBAL_STATUS: msr_info->data = pmu->global_status; break; case MSR_CORE_PERF_GLOBAL_CTRL: msr_info->data = pmu->global_ctrl; break; case MSR_CORE_PERF_GLOBAL_OVF_CTRL: msr_info->data = 0; break; default: return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info); } return 0; } int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); u32 msr = msr_info->index; u64 data = msr_info->data; u64 diff; switch (msr) { case MSR_CORE_PERF_GLOBAL_STATUS: if (!msr_info->host_initiated) return 1; /* RO MSR */ pmu->global_status = data; break; case MSR_CORE_PERF_GLOBAL_CTRL: if (!kvm_valid_perf_global_ctrl(pmu, data)) return 1; if (pmu->global_ctrl != data) { diff = pmu->global_ctrl ^ data; pmu->global_ctrl = data; reprogram_counters(pmu, diff); } break; case MSR_CORE_PERF_GLOBAL_OVF_CTRL: if (data & pmu->global_ovf_ctrl_mask) return 1; if (!msr_info->host_initiated) pmu->global_status &= ~data; break; default: kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index); return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info); } return 0; }