[PATCH 7/7] ARM: KVM: move MMIO handling to its own files

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



MMIO handling is currently tangled between mmu.c and emulate.c.

Move HSR based handling to mmio.c, and anything related to emulation
(including the readback from memory) into emulate.c.

In the end:
- mmu.c: only deals with page tables and defers to mmio.c for anything
  else
- mmio.c: only deals with valid HSR faults, and defers to emulate.c
  for anything else
- emulate.c only cares about instruction emulation

Signed-off-by: Marc Zyngier <marc.zyngier@xxxxxxx>
---
 arch/arm/include/asm/kvm_emulate.h |  24 +---
 arch/arm/include/asm/kvm_mmio.h    |  51 +++++++
 arch/arm/include/asm/kvm_mmu.h     |   1 -
 arch/arm/kvm/Makefile              |   2 +-
 arch/arm/kvm/emulate.c             | 126 ++++++++++++++++-
 arch/arm/kvm/mmio.c                | 155 +++++++++++++++++++++
 arch/arm/kvm/mmu.c                 | 269 +------------------------------------
 7 files changed, 334 insertions(+), 294 deletions(-)
 create mode 100644 arch/arm/include/asm/kvm_mmio.h
 create mode 100644 arch/arm/kvm/mmio.c

diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 304b5c7..2e2ca19 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -21,34 +21,14 @@
 
 #include <linux/kvm_host.h>
 #include <asm/kvm_asm.h>
-
-/*
- * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
- * which is an anonymous type. Use our own type instead.
- */
-struct kvm_exit_mmio {
-	phys_addr_t	phys_addr;
-	u8		data[8];
-	u32		len;
-	bool		is_write;
-};
-
-static inline void kvm_prepare_mmio(struct kvm_run *run,
-				    struct kvm_exit_mmio *mmio)
-{
-	run->mmio.phys_addr	= mmio->phys_addr;
-	run->mmio.len		= mmio->len;
-	run->mmio.is_write	= mmio->is_write;
-	memcpy(run->mmio.data, mmio->data, mmio->len);
-	run->exit_reason	= KVM_EXIT_MMIO;
-}
+#include <asm/kvm_mmio.h>
 
 unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
 unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
 
 int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int kvm_emulate_mmio_ls(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
-			unsigned long instr, struct kvm_exit_mmio *mmio);
+			struct kvm_exit_mmio *mmio);
 void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
new file mode 100644
index 0000000..31ab9f5
--- /dev/null
+++ b/arch/arm/include/asm/kvm_mmio.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@xxxxxxxxxxxxxxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_MMIO_H__
+#define __ARM_KVM_MMIO_H__
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+
+/*
+ * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
+ * which is an anonymous type. Use our own type instead.
+ */
+struct kvm_exit_mmio {
+	phys_addr_t	phys_addr;
+	u8		data[8];
+	u32		len;
+	bool		is_write;
+};
+
+static inline void kvm_prepare_mmio(struct kvm_run *run,
+				    struct kvm_exit_mmio *mmio)
+{
+	run->mmio.phys_addr	= mmio->phys_addr;
+	run->mmio.len		= mmio->len;
+	run->mmio.is_write	= mmio->is_write;
+	memcpy(run->mmio.data, mmio->data, mmio->len);
+	run->exit_reason	= KVM_EXIT_MMIO;
+}
+
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
+		 phys_addr_t fault_ipa, struct kvm_memory_slot *memslot);
+
+#endif	/* __ARM_KVM_MMIO_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index ecfaaf0..9bd0508 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -38,7 +38,6 @@ void kvm_free_stage2_pgd(struct kvm *kvm);
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 			  phys_addr_t pa, unsigned long size);
 
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
 
 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index 8f4aa02..6b19e5c 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -19,6 +19,6 @@ obj-$(CONFIG_KVM_ARM_HOST) += init.o interrupts.o
 obj-$(CONFIG_KVM_ARM_HOST) += $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
 
 obj-$(CONFIG_KVM_ARM_HOST) += arm.o guest.o mmu.o emulate.o reset.o
-obj-$(CONFIG_KVM_ARM_HOST) += coproc.o coproc_a15.o
+obj-$(CONFIG_KVM_ARM_HOST) += coproc.o coproc_a15.o mmio.o
 obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o
 obj-$(CONFIG_KVM_ARM_TIMER) += timer.o
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index dff98a3..93c9b07 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -170,6 +170,118 @@ int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	return 1;
 }
 
+static u64 kvm_va_to_pa(struct kvm_vcpu *vcpu, u32 va, bool priv)
+{
+	return kvm_call_hyp(__kvm_va_to_pa, vcpu, va, priv);
+}
+
+/**
+ * copy_from_guest_va - copy memory from guest (very slow!)
+ * @vcpu:	vcpu pointer
+ * @dest:	memory to copy into
+ * @gva:	virtual address in guest to copy from
+ * @len:	length to copy
+ * @priv:	use guest PL1 (ie. kernel) mappings
+ *              otherwise use guest PL0 mappings.
+ *
+ * Returns true on success, false on failure (unlikely, but retry).
+ */
+static bool copy_from_guest_va(struct kvm_vcpu *vcpu,
+			       void *dest, unsigned long gva, size_t len,
+			       bool priv)
+{
+	u64 par;
+	phys_addr_t pc_ipa;
+	int err;
+
+	BUG_ON((gva & PAGE_MASK) != ((gva + len) & PAGE_MASK));
+	par = kvm_va_to_pa(vcpu, gva & PAGE_MASK, priv);
+	if (par & 1) {
+		kvm_err("IO abort from invalid instruction address"
+			" %#lx!\n", gva);
+		return false;
+	}
+
+	BUG_ON(!(par & (1U << 11)));
+	pc_ipa = par & PAGE_MASK & ((1ULL << 32) - 1);
+	pc_ipa += gva & ~PAGE_MASK;
+
+
+	err = kvm_read_guest(vcpu->kvm, pc_ipa, dest, len);
+	if (unlikely(err))
+		return false;
+
+	return true;
+}
+
+/* Just ensure we're not running the guest. */
+static void do_nothing(void *info)
+{
+}
+
+/*
+ * We have to be very careful copying memory from a running (ie. SMP) guest.
+ * Another CPU may remap the page (eg. swap out a userspace text page) as we
+ * read the instruction.  Unlike normal hardware operation, to emulate an
+ * instruction we map the virtual to physical address then read that memory
+ * as separate steps, thus not atomic.
+ *
+ * Fortunately this is so rare (we don't usually need the instruction), we
+ * can go very slowly and noone will mind.
+ */
+static bool copy_current_insn(struct kvm_vcpu *vcpu, unsigned long *instr)
+{
+	int i;
+	bool ret;
+	struct kvm_vcpu *v;
+	bool is_thumb;
+	size_t instr_len;
+
+	/* Don't cross with IPIs in kvm_main.c */
+	spin_lock(&vcpu->kvm->mmu_lock);
+
+	/* Tell them all to pause, so no more will enter guest. */
+	kvm_for_each_vcpu(i, v, vcpu->kvm)
+		v->arch.pause = true;
+
+	/* Set ->pause before we read ->mode */
+	smp_mb();
+
+	/* Kick out any which are still running. */
+	kvm_for_each_vcpu(i, v, vcpu->kvm) {
+		/* Guest could exit now, making cpu wrong. That's OK. */
+		if (kvm_vcpu_exiting_guest_mode(v) == IN_GUEST_MODE)
+			smp_call_function_single(v->cpu, do_nothing, NULL, 1);
+	}
+
+
+	is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
+	instr_len = (is_thumb) ? 2 : 4;
+
+	BUG_ON(!is_thumb && *vcpu_pc(vcpu) & 0x3);
+
+	/* Now guest isn't running, we can va->pa map and copy atomically. */
+	ret = copy_from_guest_va(vcpu, instr, *vcpu_pc(vcpu), instr_len,
+				 vcpu_mode_priv(vcpu));
+	if (!ret)
+		goto out;
+
+	/* A 32-bit thumb2 instruction can actually go over a page boundary! */
+	if (is_thumb && is_wide_instruction(*instr)) {
+		*instr = *instr << 16;
+		ret = copy_from_guest_va(vcpu, instr, *vcpu_pc(vcpu) + 2, 2,
+					 vcpu_mode_priv(vcpu));
+	}
+
+out:
+	/* Release them all. */
+	kvm_for_each_vcpu(i, v, vcpu->kvm)
+		v->arch.pause = false;
+
+	spin_unlock(&vcpu->kvm->mmu_lock);
+
+	return ret;
+}
 
 /******************************************************************************
  * Load-Store instruction emulation
@@ -575,7 +687,12 @@ static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu, unsigned long instr,
  * kvm_emulate_mmio_ls - emulates load/store instructions made to I/O memory
  * @vcpu:	The vcpu pointer
  * @fault_ipa:	The IPA that caused the 2nd stage fault
- * @instr:	The instruction that caused the fault
+ * @mmio:      Pointer to struct to hold decode information
+ *
+ * Some load/store instructions cannot be emulated using the information
+ * presented in the HSR, for instance, register write-back instructions are not
+ * supported. We therefore need to fetch the instruction, decode it, and then
+ * emulate its behavior.
  *
  * Handles emulation of load/store instructions which cannot be emulated through
  * information found in the HSR on faults. It is necessary in this case to
@@ -583,12 +700,17 @@ static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu, unsigned long instr,
  * required operands.
  */
 int kvm_emulate_mmio_ls(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
-			unsigned long instr, struct kvm_exit_mmio *mmio)
+			struct kvm_exit_mmio *mmio)
 {
 	bool is_thumb;
+	unsigned long instr = 0;
 
 	trace_kvm_mmio_emulate(*vcpu_pc(vcpu), instr, *vcpu_cpsr(vcpu));
 
+	/* If it fails (SMP race?), we reenter guest for it to retry. */
+	if (!copy_current_insn(vcpu, &instr))
+		return 1;
+
 	mmio->phys_addr = fault_ipa;
 	is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
 	if (!is_thumb && !kvm_decode_arm_ls(vcpu, instr, mmio)) {
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
new file mode 100644
index 0000000..44afad4
--- /dev/null
+++ b/arch/arm/kvm/mmio.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@xxxxxxxxxxxxxxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <asm/kvm_mmio.h>
+#include <asm/kvm_emulate.h>
+#include <trace/events/kvm.h>
+
+#include "trace.h"
+
+/**
+ * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
+ * @vcpu: The VCPU pointer
+ * @run:  The VCPU run struct containing the mmio data
+ *
+ * This should only be called after returning from userspace for MMIO load
+ * emulation.
+ */
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	unsigned long *dest;
+	unsigned int len;
+	int mask;
+
+	if (!run->mmio.is_write) {
+		dest = vcpu_reg(vcpu, vcpu->arch.mmio.rd);
+		memset(dest, 0, sizeof(int));
+
+		len = run->mmio.len;
+		if (len > 4)
+			return -EINVAL;
+
+		memcpy(dest, run->mmio.data, len);
+
+		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
+				*((u64 *)run->mmio.data));
+
+		if (vcpu->arch.mmio.sign_extend && len < 4) {
+			mask = 1U << ((len * 8) - 1);
+			*dest = (*dest ^ mask) - mask;
+		}
+	}
+
+	return 0;
+}
+
+static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+		      struct kvm_exit_mmio *mmio)
+{
+	unsigned long rd, len;
+	bool is_write, sign_extend;
+
+	if ((vcpu->arch.hsr >> 8) & 1) {
+		/* cache operation on I/O addr, tell guest unsupported */
+		kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+		return 1;
+	}
+
+	if ((vcpu->arch.hsr >> 7) & 1) {
+		/* page table accesses IO mem: tell guest to fix its TTBR */
+		kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+		return 1;
+	}
+
+	switch ((vcpu->arch.hsr >> 22) & 0x3) {
+	case 0:
+		len = 1;
+		break;
+	case 1:
+		len = 2;
+		break;
+	case 2:
+		len = 4;
+		break;
+	default:
+		kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
+		return -EFAULT;
+	}
+
+	is_write = vcpu->arch.hsr & HSR_WNR;
+	sign_extend = vcpu->arch.hsr & HSR_SSE;
+	rd = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
+
+	if (kvm_vcpu_reg_is_pc(vcpu, rd)) {
+		/* IO memory trying to read/write pc */
+		kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+		return 1;
+	}
+
+	mmio->is_write = is_write;
+	mmio->phys_addr = fault_ipa;
+	mmio->len = len;
+	vcpu->arch.mmio.sign_extend = sign_extend;
+	vcpu->arch.mmio.rd = rd;
+
+	/*
+	 * The MMIO instruction is emulated and should not be re-executed
+	 * in the guest.
+	 */
+	kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
+	return 0;
+}
+
+int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
+		 phys_addr_t fault_ipa, struct kvm_memory_slot *memslot)
+{
+	struct kvm_exit_mmio mmio;
+	unsigned long rd;
+	int ret;
+
+	/*
+	 * Prepare MMIO operation. First stash it in a private
+	 * structure that we can use for in-kernel emulation. If the
+	 * kernel can't handle it, copy it into run->mmio and let user
+	 * space do its magic.
+	 */
+
+	if (vcpu->arch.hsr & HSR_ISV)
+		ret = decode_hsr(vcpu, fault_ipa, &mmio);
+	else
+		ret = kvm_emulate_mmio_ls(vcpu, fault_ipa, &mmio);
+
+	if (ret != 0)
+		return ret;
+
+	rd = vcpu->arch.mmio.rd;
+	trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
+					 KVM_TRACE_MMIO_READ_UNSATISFIED,
+			mmio.len, fault_ipa,
+			(mmio.is_write) ? *vcpu_reg(vcpu, rd) : 0);
+
+	if (mmio.is_write)
+		memcpy(mmio.data, vcpu_reg(vcpu, rd), mmio.len);
+
+	if (vgic_handle_mmio(vcpu, run, &mmio))
+		return 1;
+
+	kvm_prepare_mmio(run, &mmio);
+	return 0;
+}
+
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 59c24e3..0ab098e 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -25,6 +25,7 @@
 #include <asm/cacheflush.h>
 #include <asm/kvm_arm.h>
 #include <asm/kvm_mmu.h>
+#include <asm/kvm_mmio.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_emulate.h>
 #include <asm/mach/map.h>
@@ -580,274 +581,6 @@ out_unlock:
 }
 
 /**
- * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
- * @vcpu: The VCPU pointer
- * @run:  The VCPU run struct containing the mmio data
- *
- * This should only be called after returning from userspace for MMIO load
- * emulation.
- */
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-	unsigned long *dest;
-	unsigned int len;
-	int mask;
-
-	if (!run->mmio.is_write) {
-		dest = vcpu_reg(vcpu, vcpu->arch.mmio.rd);
-		memset(dest, 0, sizeof(int));
-
-		len = run->mmio.len;
-		if (len > 4)
-			return -EINVAL;
-
-		memcpy(dest, run->mmio.data, len);
-
-		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
-				*((u64 *)run->mmio.data));
-
-		if (vcpu->arch.mmio.sign_extend && len < 4) {
-			mask = 1U << ((len * 8) - 1);
-			*dest = (*dest ^ mask) - mask;
-		}
-	}
-
-	return 0;
-}
-
-static u64 kvm_va_to_pa(struct kvm_vcpu *vcpu, u32 va, bool priv)
-{
-	return kvm_call_hyp(__kvm_va_to_pa, vcpu, va, priv);
-}
-
-/**
- * copy_from_guest_va - copy memory from guest (very slow!)
- * @vcpu:	vcpu pointer
- * @dest:	memory to copy into
- * @gva:	virtual address in guest to copy from
- * @len:	length to copy
- * @priv:	use guest PL1 (ie. kernel) mappings
- *              otherwise use guest PL0 mappings.
- *
- * Returns true on success, false on failure (unlikely, but retry).
- */
-static bool copy_from_guest_va(struct kvm_vcpu *vcpu,
-			       void *dest, unsigned long gva, size_t len,
-			       bool priv)
-{
-	u64 par;
-	phys_addr_t pc_ipa;
-	int err;
-
-	BUG_ON((gva & PAGE_MASK) != ((gva + len) & PAGE_MASK));
-	par = kvm_va_to_pa(vcpu, gva & PAGE_MASK, priv);
-	if (par & 1) {
-		kvm_err("IO abort from invalid instruction address"
-			" %#lx!\n", gva);
-		return false;
-	}
-
-	BUG_ON(!(par & (1U << 11)));
-	pc_ipa = par & PAGE_MASK & ((1ULL << 32) - 1);
-	pc_ipa += gva & ~PAGE_MASK;
-
-
-	err = kvm_read_guest(vcpu->kvm, pc_ipa, dest, len);
-	if (unlikely(err))
-		return false;
-
-	return true;
-}
-
-/* Just ensure we're not running the guest. */
-static void do_nothing(void *info)
-{
-}
-
-/*
- * We have to be very careful copying memory from a running (ie. SMP) guest.
- * Another CPU may remap the page (eg. swap out a userspace text page) as we
- * read the instruction.  Unlike normal hardware operation, to emulate an
- * instruction we map the virtual to physical address then read that memory
- * as separate steps, thus not atomic.
- *
- * Fortunately this is so rare (we don't usually need the instruction), we
- * can go very slowly and noone will mind.
- */
-static bool copy_current_insn(struct kvm_vcpu *vcpu, unsigned long *instr)
-{
-	int i;
-	bool ret;
-	struct kvm_vcpu *v;
-	bool is_thumb;
-	size_t instr_len;
-
-	/* Don't cross with IPIs in kvm_main.c */
-	spin_lock(&vcpu->kvm->mmu_lock);
-
-	/* Tell them all to pause, so no more will enter guest. */
-	kvm_for_each_vcpu(i, v, vcpu->kvm)
-		v->arch.pause = true;
-
-	/* Set ->pause before we read ->mode */
-	smp_mb();
-
-	/* Kick out any which are still running. */
-	kvm_for_each_vcpu(i, v, vcpu->kvm) {
-		/* Guest could exit now, making cpu wrong. That's OK. */
-		if (kvm_vcpu_exiting_guest_mode(v) == IN_GUEST_MODE)
-			smp_call_function_single(v->cpu, do_nothing, NULL, 1);
-	}
-
-
-	is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
-	instr_len = (is_thumb) ? 2 : 4;
-
-	BUG_ON(!is_thumb && *vcpu_pc(vcpu) & 0x3);
-
-	/* Now guest isn't running, we can va->pa map and copy atomically. */
-	ret = copy_from_guest_va(vcpu, instr, *vcpu_pc(vcpu), instr_len,
-				 vcpu_mode_priv(vcpu));
-	if (!ret)
-		goto out;
-
-	/* A 32-bit thumb2 instruction can actually go over a page boundary! */
-	if (is_thumb && is_wide_instruction(*instr)) {
-		*instr = *instr << 16;
-		ret = copy_from_guest_va(vcpu, instr, *vcpu_pc(vcpu) + 2, 2,
-					 vcpu_mode_priv(vcpu));
-	}
-
-out:
-	/* Release them all. */
-	kvm_for_each_vcpu(i, v, vcpu->kvm)
-		v->arch.pause = false;
-
-	spin_unlock(&vcpu->kvm->mmu_lock);
-
-	return ret;
-}
-
-/**
- * invalid_io_mem_abort -- Handle I/O aborts ISV bit is clear
- *
- * @vcpu:      The vcpu pointer
- * @fault_ipa: The IPA that caused the 2nd stage fault
- * @mmio:      Pointer to struct to hold decode information
- *
- * Some load/store instructions cannot be emulated using the information
- * presented in the HSR, for instance, register write-back instructions are not
- * supported. We therefore need to fetch the instruction, decode it, and then
- * emulate its behavior.
- */
-static int invalid_io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
-				struct kvm_exit_mmio *mmio)
-{
-	unsigned long instr = 0;
-
-	/* If it fails (SMP race?), we reenter guest for it to retry. */
-	if (!copy_current_insn(vcpu, &instr))
-		return 1;
-
-	return kvm_emulate_mmio_ls(vcpu, fault_ipa, instr, mmio);
-}
-
-static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
-		      struct kvm_exit_mmio *mmio)
-{
-	unsigned long rd, len;
-	bool is_write, sign_extend;
-
-	if ((vcpu->arch.hsr >> 8) & 1) {
-		/* cache operation on I/O addr, tell guest unsupported */
-		kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
-		return 1;
-	}
-
-	if ((vcpu->arch.hsr >> 7) & 1) {
-		/* page table accesses IO mem: tell guest to fix its TTBR */
-		kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
-		return 1;
-	}
-
-	switch ((vcpu->arch.hsr >> 22) & 0x3) {
-	case 0:
-		len = 1;
-		break;
-	case 1:
-		len = 2;
-		break;
-	case 2:
-		len = 4;
-		break;
-	default:
-		kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
-		return -EFAULT;
-	}
-
-	is_write = vcpu->arch.hsr & HSR_WNR;
-	sign_extend = vcpu->arch.hsr & HSR_SSE;
-	rd = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
-
-	if (kvm_vcpu_reg_is_pc(vcpu, rd)) {
-		/* IO memory trying to read/write pc */
-		kvm_inject_pabt(vcpu, vcpu->arch.hxfar);
-		return 1;
-	}
-
-	mmio->is_write = is_write;
-	mmio->phys_addr = fault_ipa;
-	mmio->len = len;
-	vcpu->arch.mmio.sign_extend = sign_extend;
-	vcpu->arch.mmio.rd = rd;
-
-	/*
-	 * The MMIO instruction is emulated and should not be re-executed
-	 * in the guest.
-	 */
-	kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
-	return 0;
-}
-
-static int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
-			phys_addr_t fault_ipa, struct kvm_memory_slot *memslot)
-{
-	struct kvm_exit_mmio mmio;
-	unsigned long rd;
-	int ret;
-
-	/*
-	 * Prepare MMIO operation. First stash it in a private
-	 * structure that we can use for in-kernel emulation. If the
-	 * kernel can't handle it, copy it into run->mmio and let user
-	 * space do its magic.
-	 */
-
-	if (vcpu->arch.hsr & HSR_ISV)
-		ret = decode_hsr(vcpu, fault_ipa, &mmio);
-	else
-		ret = invalid_io_mem_abort(vcpu, fault_ipa, &mmio);
-
-	if (ret != 0)
-		return ret;
-
-	rd = vcpu->arch.mmio.rd;
-	trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
-					 KVM_TRACE_MMIO_READ_UNSATISFIED,
-			mmio.len, fault_ipa,
-			(mmio.is_write) ? *vcpu_reg(vcpu, rd) : 0);
-
-	if (mmio.is_write)
-		memcpy(mmio.data, vcpu_reg(vcpu, rd), mmio.len);
-
-	if (vgic_handle_mmio(vcpu, run, &mmio))
-		return 1;
-
-	kvm_prepare_mmio(run, &mmio);
-	return 0;
-}
-
-/**
  * kvm_handle_guest_abort - handles all 2nd stage aborts
  * @vcpu:	the VCPU pointer
  * @run:	the kvm_run structure
-- 
1.7.12



_______________________________________________
kvmarm mailing list
kvmarm@xxxxxxxxxxxxxxxxxxxxx
https://lists.cs.columbia.edu/cucslists/listinfo/kvmarm


[Index of Archives]     [Linux KVM]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux