[PATCH v4 07/10] ARM: KVM: Emulation framework and CP15 emulation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Adds a new important function in the main KVM/ARM code called
handle_exit() which is called from kvm_arch_vcpu_ioctl_run() on returns
from guest execution. This function examines the Hyp-Syndrome-Register
(HSR), which contains information telling KVM what caused the exit from
the guest.

Some of the reasons for an exit are CP15 accesses, which are
not allowed from the guest and this commits handles these exits by
emulating the intented operation in software and skip the guest
instruction.

Signed-off-by: Christoffer Dall <c.dall@xxxxxxxxxxxxxxxxxxxxxx>
---
 arch/arm/include/asm/kvm_emulate.h |    7 +
 arch/arm/kvm/arm.c                 |   77 ++++++++++++++
 arch/arm/kvm/arm_emulate.c         |  195 ++++++++++++++++++++++++++++++++++++
 arch/arm/kvm/trace.h               |   28 +++++
 4 files changed, 307 insertions(+), 0 deletions(-)

diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 91d461a..af21fd5 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -40,6 +40,13 @@ static inline unsigned char vcpu_mode(struct kvm_vcpu *vcpu)
 	return modes_table[vcpu->arch.regs.cpsr & 0xf];
 }
 
+int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp15_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
 /*
  * Return the SPSR for the specified mode of the virtual CPU.
  */
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 196eace..a28de12 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -35,6 +35,7 @@
 #include <asm/kvm_arm.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_mmu.h>
+#include <asm/kvm_emulate.h>
 
 #include "debug.h"
 
@@ -311,6 +312,62 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
 	return 0;
 }
 
+static inline int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
+			      int exception_index)
+{
+	unsigned long hsr_ec;
+
+	if (exception_index == ARM_EXCEPTION_IRQ)
+		return 0;
+
+	if (exception_index != ARM_EXCEPTION_HVC) {
+		kvm_err(-EINVAL, "Unsupported exception type");
+		return -EINVAL;
+	}
+
+	hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT;
+	switch (hsr_ec) {
+	case HSR_EC_WFI:
+		return kvm_handle_wfi(vcpu, run);
+	case HSR_EC_CP15_32:
+	case HSR_EC_CP15_64:
+		return kvm_handle_cp15_access(vcpu, run);
+	case HSR_EC_CP14_MR:
+		return kvm_handle_cp14_access(vcpu, run);
+	case HSR_EC_CP14_LS:
+		return kvm_handle_cp14_load_store(vcpu, run);
+	case HSR_EC_CP14_64:
+		return kvm_handle_cp14_access(vcpu, run);
+	case HSR_EC_CP_0_13:
+		return kvm_handle_cp_0_13_access(vcpu, run);
+	case HSR_EC_CP10_ID:
+		return kvm_handle_cp10_id(vcpu, run);
+	case HSR_EC_SVC_HYP:
+		/* SVC called from Hyp mode should never get here */
+		kvm_msg("SVC called from Hyp mode shouldn't go here");
+		BUG();
+	case HSR_EC_HVC:
+		kvm_msg("hvc: %x (at %08x)", vcpu->arch.hsr & ((1 << 16) - 1),
+					     vcpu->arch.regs.pc);
+		kvm_msg("         HSR: %8x", vcpu->arch.hsr);
+		break;
+	case HSR_EC_IABT:
+	case HSR_EC_DABT:
+		return kvm_handle_guest_abort(vcpu, run);
+	case HSR_EC_IABT_HYP:
+	case HSR_EC_DABT_HYP:
+		/* The hypervisor should never cause aborts */
+		kvm_msg("The hypervisor itself shouldn't cause aborts");
+		BUG();
+	default:
+		kvm_msg("Unkown exception class: %08x (%08x)", hsr_ec,
+				vcpu->arch.hsr);
+		BUG();
+	}
+
+	return 0;
+}
+
 /**
  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
  * @vcpu:	The VCPU pointer
@@ -339,6 +396,26 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 		kvm_guest_exit();
 		debug_ws_exit(vcpu->arch.regs.pc);
 		trace_kvm_exit(vcpu->arch.regs.pc);
+
+		ret = handle_exit(vcpu, run, ret);
+		if (ret) {
+			kvm_err(ret, "Error in handle_exit");
+			break;
+		}
+
+		if (run->exit_reason == KVM_EXIT_MMIO)
+			break;
+
+		if (need_resched()) {
+			vcpu_put(vcpu);
+			schedule();
+			vcpu_load(vcpu);
+		}
+
+		if (signal_pending(current) && !(run->exit_reason)) {
+			run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+			break;
+		}
 	}
 
 	return ret;
diff --git a/arch/arm/kvm/arm_emulate.c b/arch/arm/kvm/arm_emulate.c
index 6587dde..37fe029 100644
--- a/arch/arm/kvm/arm_emulate.c
+++ b/arch/arm/kvm/arm_emulate.c
@@ -14,7 +14,14 @@
  *
  */
 
+#include <linux/mm.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_host.h>
 #include <asm/kvm_emulate.h>
+#include <trace/events/kvm.h>
+
+#include "debug.h"
+#include "trace.h"
 
 #define USR_REG_OFFSET(_reg) \
 	offsetof(struct kvm_vcpu_arch, regs.usr_regs[_reg])
@@ -119,3 +126,191 @@ u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode)
 
 	return (u32 *)((void *)&vcpu->arch + vcpu_reg_offsets[mode][reg_num]);
 }
+
+/******************************************************************************
+ * Co-processor emulation
+ */
+
+struct coproc_params {
+	unsigned long CRm;
+	unsigned long CRn;
+	unsigned long Op1;
+	unsigned long Op2;
+	unsigned long Rt1;
+	unsigned long Rt2;
+	bool is_64bit;
+	bool is_write;
+};
+
+#define CP15_OP(_vcpu, _params, _cp15_reg) \
+do { \
+	if (_params->is_write) \
+		_vcpu->arch.cp15._cp15_reg = *vcpu_reg(_vcpu, _params->Rt1); \
+	else \
+		*vcpu_reg(_vcpu, _params->Rt1) = _vcpu->arch.cp15._cp15_reg; \
+} while (0);
+
+
+static inline void print_cp_instr(struct coproc_params *p)
+{
+	if (p->is_64bit) {
+		kvm_msg("    %s\tp15, %u, r%u, r%u, c%u",
+				(p->is_write) ? "mcrr" : "mrrc",
+				p->Op1, p->Rt1, p->Rt2, p->CRm);
+	} else {
+		kvm_msg("    %s\tp15, %u, r%u, c%u, c%u, %u",
+				(p->is_write) ? "mcr" : "mrc",
+				p->Op1, p->Rt1, p->CRn, p->CRm, p->Op2);
+	}
+}
+
+int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	KVMARM_NOT_IMPLEMENTED();
+	return -EINVAL;
+}
+
+int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	KVMARM_NOT_IMPLEMENTED();
+	return -EINVAL;
+}
+
+int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	KVMARM_NOT_IMPLEMENTED();
+	return -EINVAL;
+}
+
+int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	KVMARM_NOT_IMPLEMENTED();
+	return -EINVAL;
+}
+
+/**
+ * emulate_cp15_c10_access -- emulates cp15 accesses for CRn == 10
+ * @vcpu: The VCPU pointer
+ * @p:    The coprocessor parameters struct pointer holding trap inst. details
+ *
+ * This funciton may not need to exist - if we can ignore guest attempts to
+ * tamper with TLB lockdowns then it should be enough to store/restore the
+ * host/guest PRRR and NMRR memory remap registers and allow guest direct access
+ * to these registers.
+ */
+static int emulate_cp15_c10_access(struct kvm_vcpu *vcpu,
+				   struct coproc_params *p)
+{
+	BUG_ON(p->CRn != 10);
+	BUG_ON(p->is_64bit);
+
+	if ((p->CRm == 0 || p->CRm == 1 || p->CRm == 4 || p->CRm == 8) &&
+	    (p->Op2 <= 7)) {
+		/* TLB Lockdown operations - ignored */
+		return 0;
+	}
+
+	if (p->CRm == 2 && p->Op2 == 0) {
+		CP15_OP(vcpu, p, c10_PRRR);
+		return 0;
+	}
+
+	if (p->CRm == 2 && p->Op2 == 1) {
+		CP15_OP(vcpu, p, c10_NMRR);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+/**
+ * emulate_cp15_c15_access -- emulates cp15 accesses for CRn == 15
+ * @vcpu: The VCPU pointer
+ * @p:    The coprocessor parameters struct pointer holding trap inst. details
+ *
+ * The CP15 c15 register is implementation defined, but some guest kernels
+ * attempt to read/write a diagnostics register here. We always return 0 and
+ * ignore writes and hope for the best. This may need to be refined.
+ */
+static int emulate_cp15_c15_access(struct kvm_vcpu *vcpu,
+				   struct coproc_params *p)
+{
+	trace_kvm_emulate_cp15_imp(p->Op1, p->Rt1, p->CRn, p->CRm,
+				   p->Op2, p->is_write);
+
+	if (!p->is_write)
+		*vcpu_reg(vcpu, p->Rt1) = 0;
+
+	return 0;
+}
+
+/**
+ * kvm_handle_cp15_access -- handles a trap on a guest CP15 access
+ * @vcpu: The VCPU pointer
+ * @run:  The kvm_run struct
+ *
+ * Investigates the CRn/CRm and wether this was mcr/mrc or mcrr/mrrc and either
+ * simply errors out if the operation was not supported (should maybe raise
+ * undefined to guest instead?) and otherwise emulated access.
+ */
+int kvm_handle_cp15_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	unsigned long hsr_ec, instr_len;
+	struct coproc_params params;
+	int ret = 0;
+
+	hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT;
+	params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
+	params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
+	BUG_ON(params.Rt1 >= 15);
+	params.is_write = ((vcpu->arch.hsr & 1) == 0);
+	params.is_64bit = (hsr_ec == HSR_EC_CP15_64);
+
+	if (params.is_64bit) {
+		/* mrrc, mccr operation */
+		params.Op1 = (vcpu->arch.hsr >> 16) & 0xf;
+		params.Op2 = 0;
+		params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf;
+		BUG_ON(params.Rt2 >= 15);
+		params.CRn = 0;
+	} else {
+		params.CRn = (vcpu->arch.hsr >> 10) & 0xf;
+		params.Op1 = (vcpu->arch.hsr >> 14) & 0x7;
+		params.Op2 = (vcpu->arch.hsr >> 17) & 0x7;
+		params.Rt2 = 0;
+	}
+
+	/* So far no mrrc/mcrr accesses are emulated */
+	if (params.is_64bit)
+		goto unsupp_err_out;
+
+	switch (params.CRn) {
+	case 10:
+		ret = emulate_cp15_c10_access(vcpu, &params);
+		break;
+	case 15:
+		ret = emulate_cp15_c15_access(vcpu, &params);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	if (ret)
+		goto unsupp_err_out;
+
+	/* Skip instruction, since it was emulated */
+	instr_len = ((vcpu->arch.hsr >> 25) & 1) ? 4 : 2;
+	*vcpu_reg(vcpu, 15) += instr_len;
+
+	return ret;
+unsupp_err_out:
+	kvm_msg("Unsupported guest CP15 access at: %08x", vcpu->arch.regs.pc);
+	print_cp_instr(&params);
+	return -EINVAL;
+}
+
+int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	return 0;
+}
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index ac64e3a..381ea4a 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -39,6 +39,34 @@ TRACE_EVENT(kvm_exit,
 	TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
 );
 
+TRACE_EVENT(kvm_emulate_cp15_imp,
+	TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn,
+		 unsigned long CRm, unsigned long Op2, bool is_write),
+	TP_ARGS(Op1, Rt1, CRn, CRm, Op2, is_write),
+
+	TP_STRUCT__entry(
+		__field(	unsigned int,	Op1		)
+		__field(	unsigned int,	Rt1		)
+		__field(	unsigned int,	CRn		)
+		__field(	unsigned int,	CRm		)
+		__field(	unsigned int,	Op2		)
+		__field(	bool,		is_write	)
+	),
+
+	TP_fast_assign(
+		__entry->is_write		= is_write;
+		__entry->Op1			= Op1;
+		__entry->Rt1			= Rt1;
+		__entry->CRn			= CRn;
+		__entry->CRm			= CRm;
+		__entry->Op2			= Op2;
+	),
+
+	TP_printk("Implementation defined CP15: %s\tp15, %u, r%u, c%u, c%u, %u",
+			(__entry->is_write) ? "mcr" : "mrc",
+			__entry->Op1, __entry->Rt1, __entry->CRn,
+			__entry->CRm, __entry->Op2)
+);
 
 TRACE_EVENT(kvm_irq_line,
 	TP_PROTO(unsigned int type, unsigned int level, unsigned int vcpu_idx),

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux