[PATCH 2/2] KVM: vmx: Reflect misc_enables in real CPU

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



IA32_MISC_ENABLE MSR has two bits that affect the actual results which can be
observed by the guest: fast string enable, and FOPCODE compatibility.  Guests
may wish to change the default settings of these bits.

Linux usually enables fast-string by default. However, when "fast string" is
enabled data breakpoints are only recognized on boundaries between data-groups.
On some old CPUs enabling fast-string also resulted in single-step not
occurring upon each iteration.

FOPCODE compatibility can be used to analyze program performance by recording
the last instruction executed before FSAVE/FSTENV/FXSAVE.

This patch saves and restores these bits in IA32_MISC_ENABLE if they are
supported upon entry to guest and exit to userspace respectively.  To avoid
possible issues, fast-string can only be enabled by the guest if the host
enabled them. The physical CPU version is checked to ensure no shared bits are
reconfigured in the process.

Signed-off-by: Nadav Amit <namit@xxxxxxxxxxxxxxxxx>
---
 arch/x86/include/asm/kvm_host.h |  1 +
 arch/x86/kvm/svm.c              |  7 ++++++
 arch/x86/kvm/vmx.c              | 56 +++++++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/x86.c              |  2 +-
 4 files changed, 65 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4bda61b..879b930 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -699,6 +699,7 @@ struct kvm_x86_ops {
 	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
 	int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
 	void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
+	void (*set_misc_enable)(struct kvm_vcpu *vcpu, u64 data);
 	void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
 	void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
 	void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1f49c86..378e50e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -480,6 +480,11 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 	mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
 }
 
+static void svm_set_misc_enable(struct kvm_vcpu *vcpu, u64 data)
+{
+	vcpu->arch.ia32_misc_enable_msr = data;
+}
+
 static int is_external_interrupt(u32 info)
 {
 	info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
@@ -1152,6 +1157,7 @@ static void init_vmcb(struct vcpu_svm *svm)
 	init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
 
 	svm_set_efer(&svm->vcpu, 0);
+	svm_set_misc_enable(&svm->vcpu, 0);
 	save->dr6 = 0xffff0ff0;
 	kvm_set_rflags(&svm->vcpu, 2);
 	save->rip = 0x0000fff0;
@@ -4338,6 +4344,7 @@ static struct kvm_x86_ops svm_x86_ops = {
 	.set_cr3 = svm_set_cr3,
 	.set_cr4 = svm_set_cr4,
 	.set_efer = svm_set_efer,
+	.set_misc_enable = svm_set_misc_enable,
 	.get_idt = svm_get_idt,
 	.set_idt = svm_set_idt,
 	.get_gdt = svm_get_gdt,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 45bab55..2d2efd0 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -809,6 +809,8 @@ static const struct kvm_vmx_segment_field {
 };
 
 static u64 host_efer;
+static u64 host_misc_enable;
+static u64 guest_misc_enable_mask;
 
 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
 
@@ -1609,6 +1611,33 @@ static void reload_tss(void)
 	load_TR_desc();
 }
 
+static void __init update_guest_misc_enable_mask(void)
+{
+	/* Calculating which of the IA32_MISC_ENABLE bits should be reflected
+	   in hardware */
+	struct cpuinfo_x86 *c = &boot_cpu_data;
+	u64 data;
+
+	guest_misc_enable_mask = 0;
+
+	/* Core/Atom architecture share fast-string and x86 compat */
+	if (c->x86 != 6 || c->x86_model < 0xd)
+		return;
+
+	if (rdmsrl_safe(MSR_IA32_MISC_ENABLE, &data) < 0)
+		return;
+	if (boot_cpu_has(X86_FEATURE_REP_GOOD))
+		guest_misc_enable_mask |= MSR_IA32_MISC_ENABLE_FAST_STRING;
+
+	preempt_disable();
+	if (wrmsrl_safe(MSR_IA32_MISC_ENABLE,
+			data | MSR_IA32_MISC_ENABLE_X87_COMPAT) >= 0) {
+		guest_misc_enable_mask |= MSR_IA32_MISC_ENABLE_X87_COMPAT;
+		wrmsrl(MSR_IA32_MISC_ENABLE, data);
+	}
+	preempt_enable();
+}
+
 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
 {
 	u64 guest_efer;
@@ -3126,6 +3155,8 @@ static __init int hardware_setup(void)
 	if (!cpu_has_vmx_apicv())
 		enable_apicv = 0;
 
+	update_guest_misc_enable_mask();
+
 	if (enable_apicv)
 		kvm_x86_ops->update_cr8_intercept = NULL;
 	else {
@@ -3315,6 +3346,28 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 	setup_msrs(vmx);
 }
 
+static void vmx_set_misc_enable(struct kvm_vcpu *vcpu, u64 data)
+{
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+	vcpu->arch.ia32_misc_enable_msr = data;
+
+	if (guest_misc_enable_mask == 0)
+		return;
+	clear_atomic_switch_msr(vmx, MSR_IA32_MISC_ENABLE);
+	if (((data ^ host_misc_enable) & guest_misc_enable_mask) == 0)
+		return;
+
+	/*
+	 * If the guest has different value, we want to load it atomically
+	 * since it can affect host performance
+	 */
+	data &= guest_misc_enable_mask;
+	data |= (host_misc_enable & ~guest_misc_enable_mask);
+	add_atomic_switch_msr(vmx, MSR_IA32_MISC_ENABLE, data,
+				host_misc_enable);
+}
+
 #ifdef CONFIG_X86_64
 
 static void enter_lmode(struct kvm_vcpu *vcpu)
@@ -4555,6 +4608,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
 	vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
 	vmx_set_cr4(&vmx->vcpu, 0);
 	vmx_set_efer(&vmx->vcpu, 0);
+	vmx_set_misc_enable(&vmx->vcpu, 0);
 	vmx_fpu_activate(&vmx->vcpu);
 	update_exception_bitmap(&vmx->vcpu);
 
@@ -8883,6 +8937,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
 	.set_cr3 = vmx_set_cr3,
 	.set_cr4 = vmx_set_cr4,
 	.set_efer = vmx_set_efer,
+	.set_misc_enable = vmx_set_misc_enable,
 	.get_idt = vmx_get_idt,
 	.set_idt = vmx_set_idt,
 	.get_gdt = vmx_get_gdt,
@@ -8961,6 +9016,7 @@ static int __init vmx_init(void)
 	int r, i, msr;
 
 	rdmsrl_safe(MSR_EFER, &host_efer);
+	rdmsrl_safe(MSR_IA32_MISC_ENABLE, &host_misc_enable);
 
 	for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
 		kvm_define_shared_msr(i, vmx_msr_index[i]);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5f5edb6..72d449a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2096,7 +2096,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		}
 		break;
 	case MSR_IA32_MISC_ENABLE:
-		vcpu->arch.ia32_misc_enable_msr = data;
+		kvm_x86_ops->set_misc_enable(vcpu, data);
 		break;
 	case MSR_KVM_WALL_CLOCK_NEW:
 	case MSR_KVM_WALL_CLOCK:
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux