[PATCH v3 05/12] KVM: x86: Add support for exiting to userspace on rdmsr or wrmsr

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Add support for exiting to userspace on a rdmsr or wrmsr instruction if
the MSR being read from or written to is in the user_exit_msrs list.

Signed-off-by: Aaron Lewis <aaronlewis@xxxxxxxxxx>
---

v2 -> v3

  - Refactored commit based on Alexander Graf's changes in the first commit
    in this series.  Changes made were:
      - Updated member 'inject_gp' to 'error' based on struct msr in kvm_run.
      - Move flag 'vcpu->kvm->arch.user_space_msr_enabled' out of
        kvm_msr_user_space() to allow it to work with both methods that bounce
        to userspace (msr list and #GP fallback).  Updated caller functions
        to account for this change.
      - trace_kvm_msr has been moved up and combine with a previous call in
        complete_emulated_msr() based on the suggestion made by Alexander
        Graf <graf@xxxxxxxxxx>.

---
 arch/x86/kvm/trace.h | 24 ++++++++++++++
 arch/x86/kvm/x86.c   | 76 ++++++++++++++++++++++++++++++++++++++------
 2 files changed, 90 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index b66432b015d2..755610befbb5 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -367,6 +367,30 @@ TRACE_EVENT(kvm_msr,
 #define trace_kvm_msr_read_ex(ecx)         trace_kvm_msr(0, ecx, 0, true)
 #define trace_kvm_msr_write_ex(ecx, data)  trace_kvm_msr(1, ecx, data, true)
 
+TRACE_EVENT(kvm_userspace_msr,
+	TP_PROTO(bool is_write, u8 error, u32 index, u64 data),
+	TP_ARGS(is_write, error, index, data),
+
+	TP_STRUCT__entry(
+		__field(bool,	is_write)
+		__field(u8,	error)
+		__field(u32,	index)
+		__field(u64,	data)
+	),
+
+	TP_fast_assign(
+		__entry->is_write	= is_write;
+		__entry->error	= error;
+		__entry->index		= index;
+		__entry->data		= data;
+	),
+
+	TP_printk("userspace %s %x = 0x%llx, %s",
+		  __entry->is_write ? "wrmsr" : "rdmsr",
+		  __entry->index, __entry->data,
+		  __entry->error ? "error" : "no_error")
+);
+
 /*
  * Tracepoint for guest CR access.
  */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e349d51d5d65..b370b3f4b4f3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -109,6 +109,8 @@ static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
 static void store_regs(struct kvm_vcpu *vcpu);
 static int sync_regs(struct kvm_vcpu *vcpu);
 
+bool kvm_msr_user_exit(struct kvm *kvm, u32 index);
+
 struct kvm_x86_ops kvm_x86_ops __read_mostly;
 EXPORT_SYMBOL_GPL(kvm_x86_ops);
 
@@ -1629,11 +1631,19 @@ EXPORT_SYMBOL_GPL(kvm_set_msr);
 
 static int complete_emulated_msr(struct kvm_vcpu *vcpu, bool is_read)
 {
-	if (vcpu->run->msr.error) {
+	u32 ecx = vcpu->run->msr.index;
+	u64 data = vcpu->run->msr.data;
+	u8 error = vcpu->run->msr.error;
+
+	trace_kvm_userspace_msr(!is_read, error, ecx, data);
+	trace_kvm_msr(!is_read, ecx, data, !!error);
+
+	if (error) {
 		kvm_inject_gp(vcpu, 0);
+		return 1;
 	} else if (is_read) {
-		kvm_rax_write(vcpu, (u32)vcpu->run->msr.data);
-		kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32);
+		kvm_rax_write(vcpu, (u32)data);
+		kvm_rdx_write(vcpu, data >> 32);
 	}
 
 	return kvm_skip_emulated_instruction(vcpu);
@@ -1653,9 +1663,6 @@ static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index,
 			      u32 exit_reason, u64 data,
 			      int (*completion)(struct kvm_vcpu *vcpu))
 {
-	if (!vcpu->kvm->arch.user_space_msr_enabled)
-		return 0;
-
 	vcpu->run->exit_reason = exit_reason;
 	vcpu->run->msr.error = 0;
 	vcpu->run->msr.pad[0] = 0;
@@ -1686,10 +1693,18 @@ int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
 	u64 data;
 	int r;
 
+	if (kvm_msr_user_exit(vcpu->kvm, ecx)) {
+		kvm_get_msr_user_space(vcpu, ecx);
+		/* Bounce to user space */
+		return 0;
+	}
+
+
 	r = kvm_get_msr(vcpu, ecx, &data);
 
 	/* MSR read failed? See if we should ask user space */
-	if (r && kvm_get_msr_user_space(vcpu, ecx)) {
+	if (r && vcpu->kvm->arch.user_space_msr_enabled) {
+		kvm_get_msr_user_space(vcpu, ecx);
 		/* Bounce to user space */
 		return 0;
 	}
@@ -1715,10 +1730,17 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
 	u64 data = kvm_read_edx_eax(vcpu);
 	int r;
 
+	if (kvm_msr_user_exit(vcpu->kvm, ecx)) {
+		kvm_set_msr_user_space(vcpu, ecx, data);
+		/* Bounce to user space */
+		return 0;
+	}
+
 	r = kvm_set_msr(vcpu, ecx, data);
 
 	/* MSR write failed? See if we should ask user space */
-	if (r && kvm_set_msr_user_space(vcpu, ecx, data)) {
+	if (r && vcpu->kvm->arch.user_space_msr_enabled) {
+		kvm_set_msr_user_space(vcpu, ecx, data);
 		/* Bounce to user space */
 		return 0;
 	}
@@ -3606,6 +3628,25 @@ static int kvm_vm_ioctl_set_exit_msrs(struct kvm *kvm,
 	return 0;
 }
 
+bool kvm_msr_user_exit(struct kvm *kvm, u32 index)
+{
+	struct kvm_msr_list *exit_msrs;
+	int i;
+
+	exit_msrs = kvm->arch.user_exit_msrs;
+
+	if (!exit_msrs)
+		return false;
+
+	for (i = 0; i < exit_msrs->nmsrs; ++i) {
+		if (exit_msrs->indices[i] == index)
+			return true;
+	}
+
+	return false;
+}
+EXPORT_SYMBOL_GPL(kvm_msr_user_exit);
+
 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 {
 	int r = 0;
@@ -6640,9 +6681,16 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
 	int r;
 
+	if (kvm_msr_user_exit(vcpu->kvm, msr_index)) {
+		kvm_get_msr_user_space(vcpu, msr_index);
+		/* Bounce to user space */
+		return X86EMUL_IO_NEEDED;
+	}
+
 	r = kvm_get_msr(vcpu, msr_index, pdata);
 
-	if (r && kvm_get_msr_user_space(vcpu, msr_index)) {
+	if (r && vcpu->kvm->arch.user_space_msr_enabled) {
+		kvm_get_msr_user_space(vcpu, msr_index);
 		/* Bounce to user space */
 		return X86EMUL_IO_NEEDED;
 	}
@@ -6656,9 +6704,16 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
 	int r;
 
+	if (kvm_msr_user_exit(vcpu->kvm, msr_index)) {
+		kvm_set_msr_user_space(vcpu, msr_index, data);
+		/* Bounce to user space */
+		return X86EMUL_IO_NEEDED;
+	}
+
 	r = kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
 
-	if (r && kvm_set_msr_user_space(vcpu, msr_index, data)) {
+	if (r && vcpu->kvm->arch.user_space_msr_enabled) {
+		kvm_set_msr_user_space(vcpu, msr_index, data);
 		/* Bounce to user space */
 		return X86EMUL_IO_NEEDED;
 	}
@@ -11090,3 +11145,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_update_request);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_userspace_msr);
-- 
2.28.0.220.ged08abb693-goog




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux