[PATCH v1 08/11] KVM: x86/pmu: PEBS MSRs emulation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch implement the PEBS MSRs emulation in KVM, include
IA32_PEBS_ENABLE and IA32_DS_AREA.

The IA32_DS_AREA register will be added into the MSR-load list when PEBS is
enabled in KVM guest that to make the guest's DS value can be loaded to the
real HW before VM-entry, and will be removed when PEBS is disabled.

Originally-by: Andi Kleen <ak@xxxxxxxxxxxxxxx>
Signed-off-by: Luwei Kang <luwei.kang@xxxxxxxxx>
Co-developed-by: Kan Liang <kan.liang@xxxxxxxxxxxxxxx>
Signed-off-by: Kan Liang <kan.liang@xxxxxxxxxxxxxxx>
---
 arch/x86/include/asm/kvm_host.h |  4 ++++
 arch/x86/kvm/vmx/pmu_intel.c    | 45 +++++++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/vmx/vmx.c          |  4 ++--
 arch/x86/kvm/vmx/vmx.h          |  4 ++++
 arch/x86/kvm/x86.c              |  7 +++++++
 5 files changed, 62 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 033d9f9..33b990b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -479,6 +479,8 @@ struct kvm_pmu {
 	u64 global_ovf_ctrl_mask;
 	u64 reserved_bits;
 	u64 pebs_enable;
+	u64 pebs_enable_mask;
+	u64 ds_area;
 	u8 version;
 	struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
 	struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
@@ -493,6 +495,8 @@ struct kvm_pmu {
 	 */
 	bool need_cleanup;
 
+	bool has_pebs_via_ds;
+
 	/*
 	 * The total number of programmed perf_events and it helps to avoid
 	 * redundant check before cleanup if guest don't use vPMU at all.
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index a67bd34..227589a 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -67,6 +67,21 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
 		reprogram_counter(pmu, bit);
 }
 
+static void pebs_enable_changed(struct kvm_pmu *pmu, u64 data)
+{
+	struct vcpu_vmx *vmx = to_vmx(pmu_to_vcpu(pmu));
+	u64 host_ds_area;
+
+	if (data) {
+		rdmsrl_safe(MSR_IA32_DS_AREA, &host_ds_area);
+		add_atomic_switch_msr(vmx, MSR_IA32_DS_AREA,
+			pmu->ds_area, host_ds_area, false);
+	} else
+		clear_atomic_switch_msr(vmx, MSR_IA32_DS_AREA);
+
+	pmu->pebs_enable = data;
+}
+
 static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
 				      u8 event_select,
 				      u8 unit_mask)
@@ -163,6 +178,10 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
 		ret = pmu->version > 1;
 		break;
+	case MSR_IA32_DS_AREA:
+	case MSR_IA32_PEBS_ENABLE:
+		ret = pmu->has_pebs_via_ds;
+		break;
 	default:
 		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
 			get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
@@ -219,6 +238,12 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
 		*data = pmu->global_ovf_ctrl;
 		return 0;
+	case MSR_IA32_PEBS_ENABLE:
+		*data = pmu->pebs_enable;
+		return 0;
+	case MSR_IA32_DS_AREA:
+		*data = pmu->ds_area;
+		return 0;
 	default:
 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
 			u64 val = pmc_read_counter(pmc);
@@ -275,6 +300,17 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 			return 0;
 		}
 		break;
+	case MSR_IA32_PEBS_ENABLE:
+		if (pmu->pebs_enable == data)
+			return 0;
+		if (!(data & pmu->pebs_enable_mask)) {
+			pebs_enable_changed(pmu, data);
+			return 0;
+		}
+		break;
+	case MSR_IA32_DS_AREA:
+		pmu->ds_area = data;
+		return 0;
 	default:
 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
 			if (!msr_info->host_initiated)
@@ -351,6 +387,15 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 		pmu->global_ovf_ctrl_mask &=
 				~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
 
+	entry = kvm_find_cpuid_entry(vcpu, 1, 0);
+	if (entry && (entry->ecx & X86_FEATURE_DTES64) &&
+		     (entry->ecx & X86_FEATURE_PDCM) &&
+		     (entry->edx & X86_FEATURE_DS) &&
+		      intel_is_pebs_via_ds_supported()) {
+		pmu->has_pebs_via_ds = 1;
+		pmu->pebs_enable_mask = ~pmu->global_ctrl;
+	}
+
 	entry = kvm_find_cpuid_entry(vcpu, 7, 0);
 	if (entry &&
 	    (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index cef7089..c6d9a87 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -864,7 +864,7 @@ int vmx_find_msr_index(struct vmx_msrs *m, u32 msr)
 	return -ENOENT;
 }
 
-static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
+void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
 {
 	int i;
 	struct msr_autoload *m = &vmx->msr_autoload;
@@ -916,7 +916,7 @@ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
 	vm_exit_controls_setbit(vmx, exit);
 }
 
-static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
 				  u64 guest_val, u64 host_val, bool entry_only)
 {
 	int i, j = 0;
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index e64da06..ea899e7 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -536,4 +536,8 @@ static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
 
 void dump_vmcs(void);
 
+extern void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr);
+extern void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+			u64 guest_val, u64 host_val, bool entry_only);
+
 #endif /* __KVM_X86_VMX_H */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5de2006..7a23406 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1193,6 +1193,7 @@ bool kvm_rdpmc(struct kvm_vcpu *vcpu)
 	MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,
 	MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,
 	MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
+	MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA,
 };
 
 static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
@@ -5263,6 +5264,12 @@ static void kvm_init_msr_list(void)
 			if (!kvm_x86_ops->rdtscp_supported())
 				continue;
 			break;
+		case MSR_IA32_PEBS_ENABLE:
+		case MSR_IA32_DS_AREA:
+			if (!kvm_x86_ops->pmu_ops ||
+			    !kvm_x86_ops->pmu_ops->is_pebs_via_ds_supported())
+				continue;
+			break;
 		case MSR_IA32_RTIT_CTL:
 		case MSR_IA32_RTIT_STATUS:
 			if (!kvm_x86_ops->pt_supported())
-- 
1.8.3.1




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux