On 8/1/2024 12:58 PM, Mingwei Zhang wrote: > Reject PMU MSRs interception explicitly in > vmx_get_passthrough_msr_slot() since interception of PMU MSRs are > specially handled in intel_passthrough_pmu_msrs(). > > Signed-off-by: Mingwei Zhang <mizhang@xxxxxxxxxx> > Signed-off-by: Dapeng Mi <dapeng1.mi@xxxxxxxxxxxxxxx> > Tested-by: Yongwei Ma <yongwei.ma@xxxxxxxxx> > --- > arch/x86/kvm/vmx/vmx.c | 9 ++++++++- > 1 file changed, 8 insertions(+), 1 deletion(-) > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index 34a420fa98c5..41102658ed21 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -166,7 +166,7 @@ module_param(enable_passthrough_pmu, bool, 0444); > > /* > * List of MSRs that can be directly passed to the guest. > - * In addition to these x2apic, PT and LBR MSRs are handled specially. > + * In addition to these x2apic, PMU, PT and LBR MSRs are handled specially. > */ > static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = { > MSR_IA32_SPEC_CTRL, > @@ -695,6 +695,13 @@ static int vmx_get_passthrough_msr_slot(u32 msr) > case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8: > case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8: > /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */ > + case MSR_IA32_PMC0 ... MSR_IA32_PMC0 + 7: > + case MSR_IA32_PERFCTR0 ... MSR_IA32_PERFCTR0 + 7: > + case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + 2: We'd better to use the macro KVM_MAX_NR_GP_COUNTERS/KVM_MAX_NR_FIXED_COUNTERS to replace these magic number. diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 6d9ccac839b4..68d9c5e7f91e 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -696,9 +696,9 @@ static int vmx_get_passthrough_msr_slot(u32 msr) case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8: case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8: /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */ - case MSR_IA32_PMC0 ... MSR_IA32_PMC0 + 7: - case MSR_IA32_PERFCTR0 ... MSR_IA32_PERFCTR0 + 7: - case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + 2: + case MSR_IA32_PMC0 ... MSR_IA32_PMC0 + KVM_MAX_NR_GP_COUNTERS - 1: + case MSR_IA32_PERFCTR0 ... MSR_IA32_PERFCTR0 + KVM_MAX_NR_GP_COUNTERS - 1: + case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + KVM_MAX_NR_FIXED_COUNTERS -1: case MSR_CORE_PERF_GLOBAL_STATUS: case MSR_CORE_PERF_GLOBAL_CTRL: case MSR_CORE_PERF_GLOBAL_OVF_CTRL: > + case MSR_CORE_PERF_GLOBAL_STATUS: > + case MSR_CORE_PERF_GLOBAL_CTRL: > + case MSR_CORE_PERF_GLOBAL_OVF_CTRL: > + /* PMU MSRs. These are handled in intel_passthrough_pmu_msrs() */ > return -ENOENT; > } >