When CPUID.01H:EDX.DS[21] is set, the IA32_DS_AREA MSR exists and points to the linear address of the first byte of the DS buffer management area, which is used to manage the PEBS records. When guest PEBS is enabled and the value is different from the host, KVM will add the IA32_DS_AREA MSR to the msr-switch list. The guest's DS value can be loaded to the real HW before VM-entry, and will be removed when guest PEBS is disabled. The WRMSR to IA32_DS_AREA MSR brings a #GP(0) if the source register contains a non-canonical address. The switch of IA32_DS_AREA MSR would also, setup a quiescent period to write the host PEBS records (if any) to host DS area rather than guest DS area. When guest PEBS is enabled, the MSR_IA32_DS_AREA MSR will be added to the perf_guest_switch_msr() and switched during the VMX transitions just like CORE_PERF_GLOBAL_CTRL MSR. Originally-by: Andi Kleen <ak@xxxxxxxxxxxxxxx> Co-developed-by: Kan Liang <kan.liang@xxxxxxxxxxxxxxx> Signed-off-by: Kan Liang <kan.liang@xxxxxxxxxxxxxxx> Signed-off-by: Like Xu <like.xu@xxxxxxxxxxxxxxx> --- arch/x86/events/intel/core.c | 13 +++++++++++++ arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/vmx/pmu_intel.c | 11 +++++++++++ arch/x86/kvm/vmx/vmx.c | 6 ++++++ 4 files changed, 31 insertions(+) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index d824c7156d34..71b45376ee1f 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3394,6 +3394,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; + struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask; @@ -3440,6 +3441,18 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) *nr = 2; } + if (arr[1].guest) { + arr[2].msr = MSR_IA32_DS_AREA; + arr[2].host = (unsigned long)ds; + /* KVM will update MSR_IA32_DS_AREA with the trapped guest value. */ + arr[2].guest = 0ull; + *nr = 3; + } else if (*nr == 2) { + arr[2].msr = MSR_IA32_DS_AREA; + arr[2].host = arr[2].guest = 0; + *nr = 3; + } + return arr; } diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 6d7e895ae535..eb24c4796f8b 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -449,6 +449,7 @@ struct kvm_pmu { DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX); DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX); + u64 ds_area; u64 pebs_enable; u64 pebs_enable_mask; diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 2f10587bda19..ff5fc405703f 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -183,6 +183,9 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) case MSR_IA32_PEBS_ENABLE: ret = vcpu->arch.perf_capabilities & PERF_CAP_PEBS_FORMAT; break; + case MSR_IA32_DS_AREA: + ret = guest_cpuid_has(vcpu, X86_FEATURE_DS); + break; default: ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || @@ -227,6 +230,9 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_PEBS_ENABLE: msr_info->data = pmu->pebs_enable; return 0; + case MSR_IA32_DS_AREA: + msr_info->data = pmu->ds_area; + return 0; default: if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { @@ -294,6 +300,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 0; } break; + case MSR_IA32_DS_AREA: + if (is_noncanonical_address(data, vcpu)) + return 1; + pmu->ds_area = data; + return 0; default: if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 83a16ae04b4e..94dbd79a8582 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -975,6 +975,7 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, return; } break; + case MSR_IA32_DS_AREA: case MSR_IA32_PEBS_ENABLE: /* PEBS needs a quiescent period after being disabled (to write * a record). Disabling PEBS through VMX MSR swapping doesn't @@ -6536,12 +6537,17 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) { int i, nr_msrs; struct perf_guest_switch_msr *msrs; + struct kvm_vcpu *vcpu = &vmx->vcpu; + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); msrs = perf_guest_get_msrs(&nr_msrs); if (!msrs) return; + if (nr_msrs > 2 && msrs[1].guest) + msrs[2].guest = pmu->ds_area; + for (i = 0; i < nr_msrs; i++) if (msrs[i].host == msrs[i].guest) clear_atomic_switch_msr(vmx, msrs[i].msr); -- 2.21.3