On 12/26/2022 7:17 PM, Like Xu wrote:
From: Like Xu <likexu@xxxxxxxxxxx> When the PMU is disabled, don't bother sharing the PMU MSRs with userspace through KVM_GET_MSR_INDEX_LIST. Instead, filter them out so userspace doesn't have to keep track of them. Note that 'enable_pmu' is read-only, so userspace has no control over whether the PMU MSRs are included in the list or not. Suggested-by: Sean Christopherson <seanjc@xxxxxxxxxx> Co-developed-by: Aaron Lewis <aaronlewis@xxxxxxxxxx> Signed-off-by: Aaron Lewis <aaronlewis@xxxxxxxxxx> Signed-off-by: Like Xu <likexu@xxxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/x86.c | 22 ++++++++++++++++++++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f35f1ff4427b..2ed710b393eb 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -514,6 +514,7 @@ struct kvm_pmc { #define MSR_ARCH_PERFMON_PERFCTR_MAX (MSR_ARCH_PERFMON_PERFCTR0 + KVM_INTEL_PMC_MAX_GENERIC - 1) #define MSR_ARCH_PERFMON_EVENTSEL_MAX (MSR_ARCH_PERFMON_EVENTSEL0 + KVM_INTEL_PMC_MAX_GENERIC - 1) #define KVM_PMC_MAX_FIXED 3 +#define MSR_ARCH_PERFMON_FIXED_CTR_MAX (MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_PMC_MAX_FIXED - 1) #define KVM_AMD_PMC_MAX_GENERIC 6 struct kvm_pmu { unsigned nr_arch_gp_counters; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5c3ce39cdccb..f570367463c8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7054,15 +7054,32 @@ static void kvm_init_msr_list(void) continue; break; case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR_MAX: - if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >= + if (!enable_pmu || msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >= min(KVM_INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp)) continue; break; case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL_MAX: - if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >= + if (!enable_pmu || msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >= min(KVM_INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp)) continue; break; + case MSR_ARCH_PERFMON_FIXED_CTR0 ... MSR_ARCH_PERFMON_FIXED_CTR_MAX: + if (!enable_pmu || msrs_to_save_all[i] - MSR_ARCH_PERFMON_FIXED_CTR0 >= + min(KVM_PMC_MAX_FIXED, kvm_pmu_cap.num_counters_fixed)) + continue; + break; + case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: + case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3: + case MSR_CORE_PERF_FIXED_CTR_CTRL: + case MSR_CORE_PERF_GLOBAL_STATUS: + case MSR_CORE_PERF_GLOBAL_CTRL: + case MSR_CORE_PERF_GLOBAL_OVF_CTRL: + case MSR_IA32_DS_AREA: + case MSR_IA32_PEBS_ENABLE: + case MSR_PEBS_DATA_CFG: + if (!enable_pmu) + continue; + break;
I prefer use a helper to wrap the hunk of PMU msr checks and move the helper to
the "default" branch of switch, it makes the code looks nicer: default: if(!enable_pmu && !kvm_pmu_valid_msrlist(msr)) continue;
case MSR_IA32_XFD: case MSR_IA32_XFD_ERR: if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) @@ -13468,3 +13485,4 @@ static void __exit kvm_x86_exit(void) */ } module_exit(kvm_x86_exit); +
Extra newline.