[Android-virt] [PATCH] ARM: KVM: Monitor-Hypervisor API

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi Catalin.

This patch contains the KVM code including the simple version of the
initialization API that we discussed.

Perhaps you can give the function kvm_arch_hardware_enable() in
arch/arm/kvm/arm.c and the content in arch/arm/kvm/arm_interrupts.S an early
review if you have time.

Best,
	Christoffer

---
diff --git a/arch/arm/include/asm/kvm.h b/arch/arm/include/asm/kvm.h
new file mode 100644
index 0000000..3135622
--- /dev/null
+++ b/arch/arm/include/asm/kvm.h
@@ -0,0 +1,66 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ */
+
+#ifndef __ARM_KVM_H__
+#define __ARM_KVM_H__
+
+#include <asm/types.h>
+
+/*
+ * Modes used for short-hand mode determinition in the world-switch code and
+ * in emulation code.
+ *
+ * Note: These indices do NOT correspond to the value of the CPSR mode bits!
+ */
+#define MODE_FIQ     0
+#define MODE_IRQ     1
+#define MODE_SVC     2
+#define MODE_ABORT   3
+#define MODE_UNDEF   4
+#define MODE_USER    5
+#define MODE_SYSTEM  6
+
+struct kvm_regs {
+	__u32 regs0_7[8];	/* Unbanked regs. (r0 - r7)	   */
+	__u32 fiq_regs8_12[5];	/* Banked fiq regs. (r8 - r12)	   */
+	__u32 usr_regs8_12[5];	/* Banked usr registers (r8 - r12) */
+	__u32 reg13[6];		/* Banked r13, indexed by MODE_	   */
+	__u32 reg14[6];		/* Banked r13, indexed by MODE_	   */
+	__u32 reg15;
+	__u32 cpsr;
+	__u32 spsr[5];		/* Banked SPSR,  indexed by MODE_  */
+	struct {
+		__u32 c0_cpuid;
+		__u32 c2_base0;
+		__u32 c2_base1;
+		__u32 c3;
+	} cp15;
+
+};
+
+struct kvm_sregs {
+};
+
+struct kvm_fpu {
+};
+
+struct kvm_guest_debug_arch {
+};
+
+struct kvm_debug_exit_arch {
+};
+
+#endif /* __ARM_KVM_H__ */
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
new file mode 100644
index 0000000..3bb3ef1
--- /dev/null
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -0,0 +1,107 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ */
+
+#ifndef __KVM_ARM_H__
+#define __KVM_ARM_H__
+
+#include <asm/types.h>
+
+/* Hyp Configuration Register (HCR) bits */
+#define HCR_TGE		(1 << 27)
+#define HCR_TVM		(1 << 26)
+#define HCR_TTLB	(1 << 25)
+#define HCR_TPU		(1 << 24)
+#define HCR_TPC		(1 << 23)
+#define HCR_TSW		(1 << 22)
+#define HCR_TAC		(1 << 21)
+#define HCR_TIDCP	(1 << 20)
+#define HCR_TSC		(1 << 19)
+#define HCR_TID3	(1 << 18)
+#define HCR_TID2	(1 << 17)
+#define HCR_TID1	(1 << 16)
+#define HCR_TID0	(1 << 15)
+#define HCR_TWE		(1 << 14)
+#define HCR_TWI		(1 << 13)
+#define HCR_DC		(1 << 12)
+#define HCR_BSU		(3 << 10)
+#define HCR_FB		(1 << 9)
+#define HCR_VA		(1 << 8)
+#define HCR_VI		(1 << 7)
+#define HCR_VF		(1 << 6)
+#define HCR_AMO		(1 << 5)
+#define HCR_IMO		(1 << 4)
+#define HCR_FMO		(1 << 3)
+#define HCR_PTW		(1 << 2)
+#define HCR_SWIO	(1 << 1)
+#define HCR_VM		1
+
+/* Hyp System Control Register (HSCTLR) bits */
+#define HSCTLR_TE	(1 << 30)
+#define HSCTLR_EE	(1 << 25)
+#define HSCTLR_FI	(1 << 21)
+#define HSCTLR_WXN	(1 << 19)
+#define HSCTLR_I	(1 << 12)
+#define HSCTLR_C	(1 << 2)
+#define HSCTLR_A	(1 << 1)
+#define HSCTLR_M	1
+#define HSCTLR_MASK	(HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \
+			 HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE)
+
+/* TTBCR and HTCR Registers bits */
+#define TTBCR_EAE	(1 << 31)
+#define TTBCR_IMP	(1 << 30)
+#define TTBCR_SH1	(3 << 28)
+#define TTBCR_ORGN1	(3 << 26)
+#define TTBCR_IRGN1	(3 << 24)
+#define TTBCR_EPD1	(1 << 23)
+#define TTBCR_A1	(1 << 22)
+#define TTBCR_T1SZ	(3 << 16)
+#define TTBCR_SH0	(3 << 12)
+#define TTBCR_ORGN0	(3 << 10)
+#define TTBCR_IRGN0	(3 << 8)
+#define TTBCR_EPD0	(1 << 7)
+#define TTBCR_T0SZ	3
+#define HTCR_MASK	(TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Change the current processor mode to Hyp mode.
+ * You should never return to user space or enable interrupts before calling
+ * kvm_arm_hyp_return.
+ */
+static inline void kvm_arm_hyp_mode(void)
+{
+	__asm__ (
+		"push	{lr}\n\t"
+		"hvc	#0\n\t"
+		"pop	{lr}\n\t");
+}
+
+/*
+ * Return from Hyp mode to Svc mode.
+ */
+static inline void kvm_arm_hyp_return(void)
+{
+	__asm__ (
+		"push	{lr}\n\t"
+		"hvc	#0\n\t"
+		"pop	{lr}\n\t");
+}
+
+#endif
+
+#endif /* __KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
new file mode 100644
index 0000000..99991b4
--- /dev/null
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -0,0 +1,40 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ */
+
+#ifndef __ARM_KVM_ASM_H__
+#define __ARM_KVM_ASM_H__
+
+#define ARM_EXCEPTION_RESET	  0
+#define ARM_EXCEPTION_UNDEFINED   1
+#define ARM_EXCEPTION_SOFTWARE    2
+#define ARM_EXCEPTION_PREF_ABORT  3
+#define ARM_EXCEPTION_DATA_ABORT  4
+#define ARM_EXCEPTION_IRQ	  5
+#define ARM_EXCEPTION_FIQ	  6
+#define ARM_EXCEPTION_HVC	  7
+
+/*
+ * SMC Hypervisor API call numbers
+ */
+#ifdef __ASSEMBLY__
+#define SMC_HYP_CALL(n, x) .equ n, x
+#else /* !__ASSEMBLY__ */
+#define SMC_HYP_CALL(n, x) asm(".equ " #n ", " #x);
+#endif /* __ASSEMBLY__ */
+
+SMC_HYP_CALL(SMCHYP_HVBAR_W  , 0xfffffff0)
+
+#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
new file mode 100644
index 0000000..8d727a6
--- /dev/null
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -0,0 +1,77 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ */
+
+#ifndef __ARM_KVM_EMULATE_H__
+#define __ARM_KVM_EMULATE_H__
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_asm.h>
+
+u32* kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
+
+/*
+ * Helper function to do what's needed when switching modes
+ */
+static inline int kvm_switch_mode(struct kvm_vcpu *vcpu, u8 new_cpsr)
+{
+	u8 new_mode;
+	u8 old_mode = VCPU_MODE(vcpu);
+	int ret = 0;
+
+	u8 modes_table[16] = {
+		MODE_USER,	// 0x0
+		MODE_FIQ,	// 0x1
+		MODE_IRQ,	// 0x2
+		MODE_SVC,	// 0x3
+		0xf, 0xf, 0xf,
+		MODE_ABORT,	// 0x7
+		0xf, 0xf, 0xf,
+		MODE_UNDEF,	// 0xb
+		0xf, 0xf, 0xf,
+		MODE_SYSTEM};	// 0xf
+
+	new_mode = modes_table[new_cpsr & 0xf];
+	BUG_ON(new_mode == 0xf);
+
+	if (new_mode == old_mode)
+		return 0;
+
+	// TODO: Check this for Virt-Ext implementation
+	if (new_mode == MODE_USER || old_mode == MODE_USER) {
+		/* Switch btw. priv. and non-priv. */
+		//ret = kvm_init_l1_shadow(vcpu, vcpu->arch.shadow_pgtable->pgd);
+	}
+	//vcpu->arch.shared_page->vcpu_mode = new_mode;
+
+	return ret;
+}
+
+/*
+ * Write to the virtual CPSR.
+ * The CPSR should NEVER be written directly!
+ */
+static inline void kvm_cpsr_write(struct kvm_vcpu *vcpu, u32 new_cpsr)
+{
+	if ((new_cpsr & MODE_MASK) != (vcpu->arch.regs.cpsr & MODE_MASK)) {
+		BUG_ON(kvm_switch_mode(vcpu, new_cpsr));
+	}
+
+	BUG_ON((new_cpsr & PSR_N_BIT) && (new_cpsr & PSR_Z_BIT));
+
+	vcpu->arch.regs.cpsr = new_cpsr;
+}
+
+#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
new file mode 100644
index 0000000..20bc7dc
--- /dev/null
+++ b/arch/arm/include/asm/kvm_host.h
@@ -0,0 +1,181 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ */
+
+#ifndef __ARM_KVM_HOST_H__
+#define __ARM_KVM_HOST_H__
+
+#define KVM_MAX_VCPUS 1
+#define KVM_MEMORY_SLOTS 32
+#define KVM_PRIVATE_MEM_SLOTS 4
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
+/* We don't currently support large pages. */
+#define KVM_HPAGE_GFN_SHIFT(x)	0
+#define KVM_NR_PAGE_SIZES	1
+#define KVM_PAGES_PER_HPAGE(x)	(1UL<<31)
+
+struct kvm_vcpu;
+u32* kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
+
+struct kvm_arch {
+	pgd_t *pgd;	/* 1-level 2nd stage table */
+};
+
+#define VCPU_MODE(_vcpu) \
+	(*((_vcpu)->arch.mode))
+
+/* Get vcpu register for current mode */
+#define vcpu_reg(_vcpu, _reg_num) \
+	(*kvm_vcpu_reg((_vcpu), _reg_num, VCPU_MODE(_vcpu)))
+
+/* Get vcpu register for specific mode */
+#define vcpu_reg_m(_vcpu, _reg_num, _mode) \
+	(*kvm_vcpu_reg(_vcpu, _reg_num, _mode))
+
+#define vcpu_cpsr(_vcpu) \
+	(_vcpu->arch.regs.cpsr)
+
+/* Get vcpu SPSR for current mode */
+#define vcpu_spsr(_vcpu) \
+	(_vcpu->arch.regs.spsr[VCPU_MODE(_vcpu)])
+
+/* Get vcpu SPSR for specific mode */
+#define vcpu_spsr_m(_vcpu, _mode) \
+	(_vcpu->arch.regs.spsr[_mode])
+
+#define MODE_HAS_SPSR(_vcpu) \
+	 ((VCPU_MODE(_vcpu)) < MODE_USER)
+
+#define VCPU_MODE_PRIV(_vcpu) \
+	(((VCPU_MODE(_vcpu)) == MODE_USER) ? 0 : 1)
+
+#define EXCEPTION_NONE      0
+#define EXCEPTION_RESET     0x80
+#define EXCEPTION_UNDEFINED 0x40
+#define EXCEPTION_SOFTWARE  0x20
+#define EXCEPTION_PREFETCH  0x10
+#define EXCEPTION_DATA      0x08
+#define EXCEPTION_IMPRECISE 0x04
+#define EXCEPTION_IRQ       0x02
+#define EXCEPTION_FIQ       0x01
+
+struct kvm_vcpu_regs {
+	u32 fiq_reg[5];		/* FIQ  Mode r8-r12 */
+	u32 usr_reg[5];		/* USER Mode r8-r12 */
+	u32 banked_fiq[2];	/* FIQ r13,r14 */
+	u32 banked_irq[2];	/* IRQ r13,r14 */
+	u32 banked_svc[2];	/* SVC r13,r14 */
+	u32 banked_abt[2];	/* ABORT r13,r14 */
+	u32 banked_und[2];	/* UNDEFINED r13,r14 */
+	u32 banked_usr[2];	/* USER r13,r14 */
+	u32 shared_reg[8];	/* Shared r0-r7 */
+	u32 r15;		/* r15 */
+	u32 cpsr;		/* Guest emulated CPSR */
+	u32 spsr[5];		/* Guest SPSR per-mode */
+} __packed;
+
+struct kvm_vcpu_arch {
+	/* Pointer to regs struct on shared page */
+	struct kvm_vcpu_regs regs;
+
+	/* Pointer to cached mode on shared page */
+	unsigned long *mode;
+
+	/* System control coprocessor (cp15) */
+	struct {
+		u32 c0_MIDR;		/* Main ID Register */
+		u32 c0_CTR;		/* Cache Type Register */
+		u32 c0_TCMTR;   	/* Tightly Coupled Memory Type Register */
+		u32 c0_TLBTR;   	/* TLB Type Register */
+		u32 c1_CR;		/* Control Register */
+		u32 c1_ACR;		/* Auxilliary Control Register */
+		u32 c1_CAR;		/* Coprocessor Access Register */
+		u32 c2_TTBR0;		/* Translation Table Base Register 0 */
+		u32 c2_TTBR1;		/* Translation Table Base Register 1 */
+		u32 c2_TTBR_CR;		/* Translation Table Base Register Control */
+		u32 c3_DACR;		/* Domain Access Control Register */
+		u32 c5_DFSR;		/* Fault Status Register */
+		u32 c5_IFSR;		/* Fault Status Register */
+		u32 c6_FAR;		/* Fault Address Register */
+		u32 c7_CDSR;		/* Cache Dirty Status Register */
+		u32 c7_RBTSR;		/* Read Block Transfer Status Register */
+		u32 c9_DCLR;		/* Data Cache Lockdown Register */
+		u32 c9_ICLR;		/* Instruction Cachce Lockdown Register */
+		u32 c9_DTCMR;		/* Data TCM Region */
+		u32 c9_ITCMR;		/* Instruction TCM Region */
+		u32 c10_TLBLR;		/* TLB Lockdown Register */
+		u32 c13_FCSER;		/* Fast Context Switch Extension Register */
+		u32 c13_CID;		/* Context ID Register */
+		u32 c13_TIDURW;		/* User Read/Write Thread and Process ID */
+		u32 c13_TIDURO;		/* User Read-only Thread and Process ID */
+		u32 c13_TIDPO;		/* Privileged only Thread and Process ID */
+	} cp15;
+
+	u32 guest_exception;  		/* Hardware exception that exited the guest */
+	u32 exception_pending;  	/* Exception to raise after emulation */
+
+	/* Host status */
+	u32 host_far;		/* Fault access register */
+	u32 host_fsr;		/* Fault status register */
+	u32 host_ifsr;		/* Fault status register */
+
+	/* IO related fields */
+	u32 mmio_rd;
+
+	/* Misc. fields */
+	u32 wait_for_interrupts;
+
+	struct kvm_run *kvm_run;
+};
+
+struct kvm_vm_stat {
+	u32 remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+	u32 sum_exits;
+	u32 mmio_exits;
+	u32 dcr_exits;
+	u32 signal_exits;
+	u32 light_exits;
+	/* Account for special types of light exits: */
+	u32 itlb_real_miss_exits;
+	u32 itlb_virt_miss_exits;
+	u32 dtlb_real_miss_exits;
+	u32 dtlb_virt_miss_exits;
+	u32 syscall_exits;
+	u32 isi_exits;
+	u32 dsi_exits;
+	u32 emulated_inst_exits;
+	u32 dec_exits;
+	u32 ext_intr_exits;
+	u32 halt_wakeup;
+};
+
+static inline gpa_t kvm_guest_ttbr(struct kvm_vcpu_arch *vcpu_arch, gva_t gva)
+{
+	unsigned int n = 0;
+
+	BUG_ON(vcpu_arch->cp15.c2_TTBR_CR & ~0x7);
+	n = vcpu_arch->cp15.c2_TTBR_CR & 0x7;
+
+	if (n != 0 && (gva >> (32-n)) == 0)
+		return vcpu_arch->cp15.c2_TTBR1 & (~0 << 14);
+
+	return vcpu_arch->cp15.c2_TTBR0 & (~0 << (14 - n));
+}
+
+#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
new file mode 100644
index 0000000..06853b6
--- /dev/null
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -0,0 +1,33 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ */
+
+#ifndef __ARM_KVM_MMU_H__
+#define __ARM_KVM_MMU_H__
+
+/*
+ * The architecture supports 40-bit IPA as input to the 2nd stage translations
+ * and PTRS_PER_PGD2 could therefore be 1024.
+ *
+ * To save a bit of memory and to avoid alignment issues we assume 39-bit IPA
+ * for now, but remember that the level-1 table must be aligned to its size.
+ */
+#define PTRS_PER_PGD2	512
+#define PGD2_ORDER	get_order(PTRS_PER_PGD2 * sizeof(pgd_t))
+
+int kvm_alloc_stage2_pgd(struct kvm *kvm);
+void kvm_free_stage2_pgd(struct kvm *kvm);
+
+#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/kvm_para.h b/arch/arm/include/asm/kvm_para.h
new file mode 100644
index 0000000..7ce5f1c
--- /dev/null
+++ b/arch/arm/include/asm/kvm_para.h
@@ -0,0 +1,9 @@
+#ifndef _ASM_X86_KVM_PARA_H
+#define _ASM_X86_KVM_PARA_H
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+	return 0;
+}
+
+#endif /* _ASM_X86_KVM_PARA_H */
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
index bc63116..bfaa438 100644
--- a/arch/arm/include/asm/unified.h
+++ b/arch/arm/include/asm/unified.h
@@ -54,6 +54,18 @@
 
 #endif	/* CONFIG_THUMB2_KERNEL */
 
+#ifdef CONFIG_KVM
+#ifdef __ASSEMBLY__
+.arch_extension sec
+.arch_extension virt
+#else
+__asm__(
+".arch_extension sec\n"
+".arch_extension virt\n"
+);
+#endif
+#endif
+
 #ifndef CONFIG_ARM_ASM_UNIFIED
 
 /*
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
new file mode 100644
index 0000000..1806a6d
--- /dev/null
+++ b/arch/arm/kvm/Kconfig
@@ -0,0 +1,44 @@
+#
+# KVM configuration
+#
+
+source "virt/kvm/Kconfig"
+
+menuconfig VIRTUALIZATION
+	bool "Virtualization"
+	---help---
+	  Say Y here to get to see options for using your Linux host to run
+	  other operating systems inside virtual machines (guests).
+	  This option alone does not add any kernel code.
+
+	  If you say N, all options in this submenu will be skipped and
+	  disabled.
+
+if VIRTUALIZATION
+
+config KVM
+	bool "Kernel-based Virtual Machine (KVM) support"
+	select PREEMPT_NOTIFIERS
+	select ANON_INODES
+	select KVM_ARM_HOST
+	select KVM_MMIO
+	---help---
+	  Support hosting virtualized guest machines. You will also
+	  need to select one or more of the processor modules below.
+
+	  This module provides access to the hardware capabilities through
+	  a character device node named /dev/kvm.
+
+	  If unsure, say N.
+
+config KVM_ARM_HOST
+	bool "KVM host support for ARM cpus."
+	depends on KVM
+	depends on MMU
+	depends on CPU_V7 || ARM_VIRT_EXT
+	---help---
+	  Provides host support for ARM processors.
+
+source drivers/virtio/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
new file mode 100644
index 0000000..4ff905d
--- /dev/null
+++ b/arch/arm/kvm/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for Kernel-based Virtual Machine module
+#
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/arm/kvm
+AFLAGS_arm_interrupts.o := -I$(obj)
+
+kvm-arm-y += $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+
+kvm-arm-y += arm.o arm_guest.o arm_interrupts.o arm_mmu.o arm_emulate.o \
+		trace.o
+
+obj-$(CONFIG_KVM) += kvm-arm.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
new file mode 100644
index 0000000..2bab6aa
--- /dev/null
+++ b/arch/arm/kvm/arm.c
@@ -0,0 +1,450 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+#include <asm/unified.h>
+#include <asm/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/mman.h>
+#include <asm/tlbflush.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmu.h>
+
+#include "../mm/mm.h"
+#include "trace.h"
+
+
+static bool kvm_arm_hardware_enabled = false;
+static void *kvm_arm_hyp_stack_page = NULL;
+extern unsigned long __kvm_hyp_vector;
+extern unsigned long __kvm_hyp_init, __kvm_hyp_init_end;
+extern struct   mm_struct init_mm;
+
+int kvm_arch_hardware_enable(void *garbage)
+{
+	unsigned long vector_ptr, hyp_stack_ptr;
+	unsigned long init_ptr, init_end_ptr, phys_addr;
+	phys_addr_t init_phys_addr;
+	u64 pfn;
+	pgprot_t prot;
+
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *pte;
+
+	if (kvm_arm_hardware_enabled)
+		return 0;
+
+	/*
+	 * Allocate stack page for Hypervisor-mode
+	 */
+	kvm_arm_hyp_stack_page = (void *)__get_free_page(GFP_KERNEL);
+	if (!kvm_arm_hyp_stack_page)
+		return -ENOMEM;
+
+	init_ptr = (unsigned long)&__kvm_hyp_init;
+	init_end_ptr = (unsigned long)&__kvm_hyp_init_end;
+	init_phys_addr = virt_to_phys((void *)&__kvm_hyp_init);
+	if (init_phys_addr > (unsigned long long)~0) {
+		kvm_err(-ENOTSUPP, "Hyp init physical address must be 32-bit\n");
+		return -ENOTSUPP;
+	}
+	phys_addr = (unsigned long)init_phys_addr;
+
+	if (init_end_ptr - init_ptr > PAGE_SIZE) {
+		kvm_err(-ENOTSUPP, "KVM init code may not exceed 1 page\n");
+		return -ENOTSUPP;
+	}
+
+	pgd = pgd_offset_k(phys_addr);
+	pud = pud_alloc(&init_mm, pgd, phys_addr);
+	if (!pud)
+		return -ENOMEM;
+	pmd = pmd_alloc(&init_mm, pud, phys_addr);
+	if (!pmd)
+		return -ENOMEM;
+	pte = pte_alloc_kernel(pmd, phys_addr);
+	if (!pte)
+		return -ENOMEM;
+	BUG_ON(!pte_none(*pte));
+
+	pfn = init_phys_addr >> PAGE_SHIFT;
+	prot = __pgprot(L_PTE_USER | L_PTE_PRESENT | L_PTE_YOUNG
+						   | L_PTE_RDONLY);
+	set_pte_ext(pte, pfn_pte(pfn, prot), 0);
+
+	/*
+	 * Set the HVBAR and stack pointer
+	 */
+	BUG_ON(init_phys_addr & 0x1f);
+	asm volatile (
+		"mov	r0, %[vector_ptr]\n\t"
+		"ldr	r7, =SMCHYP_HVBAR_W\n\t"
+		"smc	#0\n\t" :
+		: [vector_ptr] "r" ((unsigned long)init_phys_addr)
+		: "r0", "r7", "r12");
+
+	/*
+	 * Call initialization code
+	 */
+	vector_ptr = (unsigned long)&__kvm_hyp_vector;
+	hyp_stack_ptr = (unsigned long)kvm_arm_hyp_stack_page + PAGE_SIZE;
+	kvm_msg("very good, now we call init code - yikes!");
+	asm volatile (
+		"mov	r0, %[stack_ptr]\n\t"
+		"mov	r1, %[vector_ptr]\n\t"
+		"hvc	#0\n\t" :
+		: [stack_ptr] "r" (hyp_stack_ptr),
+		  [vector_ptr] "r" (vector_ptr)
+		: "r0", "r1", "r12");
+
+	/*
+	 * Unmap the identity mapping
+	 */
+	pmd_clear(pmd_off_k(phys_addr));
+
+	kvm_arm_hardware_enabled = true;
+	return 0;
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+	/* There is no need for this now, so we just ignore that */
+}
+
+int kvm_arch_hardware_setup(void)
+{
+	return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+	*(int *)rtn = 0;
+}
+
+void kvm_arch_sync_events(struct kvm *kvm)
+{
+}
+
+int kvm_arch_init_vm(struct kvm *kvm)
+{
+	return kvm_alloc_stage2_pgd(kvm);
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+	int i;
+
+	kvm_free_stage2_pgd(kvm);
+
+	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+		if (kvm->vcpus[i]) {
+			kvm_arch_vcpu_free(kvm->vcpus[i]);
+			kvm->vcpus[i] = NULL;
+		}
+	}
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+	int r;
+ 	switch (ext) {
+	case KVM_CAP_USER_MEMORY:
+ 	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
+		r = 1;
+		break;
+	case KVM_CAP_COALESCED_MMIO:
+		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+		break;
+	default:
+		r = 0;
+		break;
+	}
+	return r;
+}
+
+long kvm_arch_dev_ioctl(struct file *filp,
+			unsigned int ioctl, unsigned long arg)
+{
+	int ret = 0;
+
+	switch (ioctl) {
+	default:
+		ret = -EINVAL;
+	}
+
+	if (ret < 0)
+		printk(KERN_ERR "error processing ARM ioct: %d", ret);
+	return ret;
+}
+
+int kvm_arch_set_memory_region(struct kvm *kvm,
+			       struct kvm_userspace_memory_region *mem,
+			       struct kvm_memory_slot old,
+			       int user_alloc)
+{
+	return 0;
+}
+
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+				   struct kvm_memory_slot *memslot,
+				   struct kvm_memory_slot old,
+				   struct kvm_userspace_memory_region *mem,
+				   int user_alloc)
+{
+	KVMARM_NOT_IMPLEMENTED();
+	return -EINVAL;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+				   struct kvm_userspace_memory_region *mem,
+				   struct kvm_memory_slot old,
+				   int user_alloc)
+{
+	KVMARM_NOT_IMPLEMENTED();
+}
+
+void kvm_arch_flush_shadow(struct kvm *kvm)
+{
+	// XXX What should this do?
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+	KVMARM_NOT_IMPLEMENTED();
+	return ERR_PTR(-EINVAL);
+}
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+	KVMARM_NOT_IMPLEMENTED();
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+	kvm_arch_vcpu_free(vcpu);
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+	return 0;
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+	KVMARM_NOT_IMPLEMENTED();
+	return -EINVAL;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+}
+
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+}
+
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+}
+
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+                                        struct kvm_guest_debug *dbg)
+{
+	return -EINVAL;
+}
+
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+				    struct kvm_mp_state *mp_state)
+{
+	return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+				    struct kvm_mp_state *mp_state)
+{
+	return -EINVAL;
+}
+
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
+{
+	return (!v->arch.wait_for_interrupts);
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	KVMARM_NOT_IMPLEMENTED();
+	return -EINVAL;
+}
+
+static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+				    struct kvm_interrupt *intr)
+{
+	u32 mask;
+
+	switch (intr->irq) {
+	case EXCEPTION_IRQ:
+		/* IRQ */
+		mask = EXCEPTION_IRQ;
+		break;
+	case EXCEPTION_FIQ:
+		/* FIQ */
+		mask = EXCEPTION_FIQ;
+		break;
+	default:
+		/* Only async exceptions are supported here */
+		return -EINVAL;
+	}
+
+	if (intr->raise) {
+		if (mask == EXCEPTION_IRQ)
+			kvm_trace_activity(101, "raise IRQ");
+		else if (mask == EXCEPTION_FIQ)
+			kvm_trace_activity(102, "raise FIQ");
+		vcpu->arch.exception_pending |= mask;
+		vcpu->arch.wait_for_interrupts = 0;
+	} else {
+		if (mask == EXCEPTION_IRQ)
+			kvm_trace_activity(103, "lower IRQ");
+		else if (mask == EXCEPTION_FIQ)
+			kvm_trace_activity(104, "lower FIQ");
+
+		vcpu->arch.exception_pending &= ~mask;
+	}
+
+	return 0;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+			 unsigned int ioctl, unsigned long arg)
+{
+	struct kvm_vcpu *vcpu = filp->private_data;
+	void __user *argp = (void __user *)arg;
+	int r;
+
+	switch (ioctl) {
+	case KVM_S390_STORE_STATUS: {
+		return -EINVAL;
+	}
+	case KVM_INTERRUPT: {
+		struct kvm_interrupt intr;
+
+		r = -EFAULT;
+		if (copy_from_user(&intr, argp, sizeof intr))
+			break;
+		r = kvm_vcpu_ioctl_interrupt(vcpu, &intr);
+		break;
+	}
+	default:
+		r = -EINVAL;
+	}
+
+	return r;
+}
+
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+	return -ENOTSUPP;
+}
+
+long kvm_arch_vm_ioctl(struct file *filp,
+		       unsigned int ioctl, unsigned long arg)
+{
+	printk(KERN_ERR "kvm_arch_vm_ioctl: Unsupported ioctl (%d)\n", ioctl);
+	return -EINVAL;
+}
+
+int kvm_arch_init(void *opaque)
+{
+	return 0;
+}
+
+void kvm_arch_exit(void)
+{
+}
+
+static int k_show(struct seq_file *m, void *v)
+{
+	print_kvm_debug_info(&seq_printf, m);
+	return 0;
+}
+
+static void *k_start(struct seq_file *m, loff_t *pos)
+{
+	return *pos < 1 ? (void *)1 : NULL;
+}
+
+static void *k_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	++*pos;
+	return NULL;
+}
+
+static void k_stop(struct seq_file *m, void *v)
+{
+}
+
+static const struct seq_operations kvmproc_op = {
+	.start	= k_start,
+	.next	= k_next,
+	.stop	= k_stop,
+	.show	= k_show
+};
+
+static int kvm_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &kvmproc_op);
+}
+
+static const struct file_operations proc_kvm_operations = {
+	.open		= kvm_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int arm_init(void)
+{
+	int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+	if (rc == 0)
+		proc_create("kvm", 0, NULL, &proc_kvm_operations);
+	return rc;
+}
+
+static void __exit arm_exit(void)
+{
+	kvm_exit();
+}
+
+module_init(arm_init);
+module_exit(arm_exit)
diff --git a/arch/arm/kvm/arm_emulate.c b/arch/arm/kvm/arm_emulate.c
new file mode 100644
index 0000000..a7b880e
--- /dev/null
+++ b/arch/arm/kvm/arm_emulate.c
@@ -0,0 +1,72 @@
+/*
+ * Kernel-based Virtual Machine driver for Linux
+ *
+ * This module enables machines with Intel VT-x extensions to run virtual
+ * machines without emulation or binary translation.
+ *
+ * Copyright (C) 2006 Qumranet, Inc.
+ * Copyright 2010 Red Hat, Inc. and/or its affiliates.
+ *
+ * Authors:
+ *   Avi Kivity   <avi at qumranet.com>
+ *   Yaniv Kamay  <yaniv at qumranet.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include <asm/kvm_emulate.h>
+
+/*
+ * Return a pointer to the register number valid in the specified mode of
+ * the virtual CPU.
+ */
+u32* kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode)
+{
+	struct kvm_vcpu_regs *regs;
+	u8 reg_idx;
+	BUG_ON(reg_num > 15);
+
+	regs = &vcpu->arch.regs;
+
+	/* The PC is trivial */
+	if (reg_num == 15)
+		return &(regs->r15);
+
+	/* Non-banked registers */
+	if (reg_num < 8)
+		return &(regs->shared_reg[reg_num]);
+
+	/* Banked registers r13 and r14 */
+	if (reg_num >= 13) {
+		reg_idx = reg_num - 13; /* 0=r13 and 1=r14 */
+		switch (mode) {
+		case MODE_FIQ:
+			return &(regs->banked_fiq[reg_idx]);
+		case MODE_IRQ:
+			return &(regs->banked_irq[reg_idx]);
+		case MODE_SVC:
+			return &(regs->banked_svc[reg_idx]);
+		case MODE_ABORT:
+			return &(regs->banked_abt[reg_idx]);
+		case MODE_UNDEF:
+			return &(regs->banked_und[reg_idx]);
+		case MODE_USER:
+		case MODE_SYSTEM:
+			return &(regs->banked_usr[reg_idx]);
+		}
+	}
+
+	/* Banked FIQ registers r8-r12 */
+	if (reg_num >= 8 && reg_num <= 12) {
+		reg_idx = reg_num - 8; /* 0=r8, ..., 4=r12 */
+		if (mode == MODE_FIQ)
+			return &(regs->fiq_reg[reg_idx]);
+		else
+			return &(regs->usr_reg[reg_idx]);
+	}
+
+	BUG();
+	return NULL;
+}
diff --git a/arch/arm/kvm/arm_guest.c b/arch/arm/kvm/arm_guest.c
new file mode 100644
index 0000000..555735f
--- /dev/null
+++ b/arch/arm/kvm/arm_guest.c
@@ -0,0 +1,130 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_emulate.h>
+
+
+#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+};
+
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+	return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+	struct kvm_vcpu_regs *vcpu_regs = &vcpu->arch.regs;
+
+	/*
+	 * GPRs and PSRs
+	 */
+	memcpy(regs->regs0_7, &(vcpu_regs->shared_reg[0]), sizeof(u32) * 8);
+	memcpy(regs->usr_regs8_12, &(vcpu_regs->usr_reg[0]), sizeof(u32) * 5);
+	memcpy(regs->fiq_regs8_12, &(vcpu_regs->fiq_reg[0]), sizeof(u32) * 5);
+	regs->reg13[MODE_FIQ]   = vcpu_regs->banked_fiq[0];
+	regs->reg14[MODE_FIQ]   = vcpu_regs->banked_fiq[1];
+	regs->reg13[MODE_IRQ]   = vcpu_regs->banked_irq[0];
+	regs->reg14[MODE_IRQ]   = vcpu_regs->banked_irq[1];
+	regs->reg13[MODE_SVC]   = vcpu_regs->banked_svc[0];
+	regs->reg14[MODE_SVC]   = vcpu_regs->banked_svc[1];
+	regs->reg13[MODE_ABORT] = vcpu_regs->banked_abt[0];
+	regs->reg14[MODE_ABORT] = vcpu_regs->banked_abt[1];
+	regs->reg13[MODE_UNDEF] = vcpu_regs->banked_und[0];
+	regs->reg14[MODE_UNDEF] = vcpu_regs->banked_und[1];
+	regs->reg13[MODE_USER]  = vcpu_regs->banked_usr[0];
+	regs->reg14[MODE_USER]  = vcpu_regs->banked_usr[1];
+	regs->reg15 = vcpu_reg(vcpu, 15);
+	regs->cpsr = vcpu_regs->cpsr;
+	memcpy(regs->spsr, vcpu_regs->spsr, sizeof(u32) * 5);
+
+	/*
+	 * Co-processor registers.
+	 */
+	regs->cp15.c0_cpuid = vcpu->arch.cp15.c0_MIDR;
+	regs->cp15.c2_base0 = vcpu->arch.cp15.c2_TTBR0;
+	regs->cp15.c2_base1 = vcpu->arch.cp15.c2_TTBR1;
+	regs->cp15.c3 = vcpu->arch.cp15.c3_DACR;
+
+	return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+	struct kvm_vcpu_regs *vcpu_regs = &vcpu->arch.regs;
+
+	memcpy(&(vcpu_regs->shared_reg[0]), regs->regs0_7, sizeof(u32) * 8);
+	memcpy(&(vcpu_regs->usr_reg[0]), regs->usr_regs8_12, sizeof(u32) * 5);
+	memcpy(&(vcpu_regs->fiq_reg[0]), regs->fiq_regs8_12, sizeof(u32) * 5);
+	vcpu_regs->banked_fiq[0] = regs->reg13[MODE_FIQ];
+	vcpu_regs->banked_fiq[1] = regs->reg14[MODE_FIQ];
+	vcpu_regs->banked_irq[0] = regs->reg13[MODE_IRQ];
+	vcpu_regs->banked_irq[1] = regs->reg14[MODE_IRQ];
+	vcpu_regs->banked_svc[0] = regs->reg13[MODE_SVC];
+	vcpu_regs->banked_svc[1] = regs->reg14[MODE_SVC];
+	vcpu_regs->banked_abt[0] = regs->reg13[MODE_ABORT];
+	vcpu_regs->banked_abt[1] = regs->reg14[MODE_ABORT];
+	vcpu_regs->banked_und[0] = regs->reg13[MODE_UNDEF];
+	vcpu_regs->banked_und[1] = regs->reg14[MODE_UNDEF];
+	vcpu_regs->banked_usr[0] = regs->reg13[MODE_USER];
+	vcpu_regs->banked_usr[1] = regs->reg14[MODE_USER];
+
+	vcpu_reg(vcpu, 15) = regs->reg15;
+	kvm_cpsr_write(vcpu, regs->cpsr);
+	memcpy(vcpu_regs->spsr, regs->spsr, sizeof(u32) * 5);
+
+	return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+				  struct kvm_sregs *sregs)
+{
+	return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+				  struct kvm_sregs *sregs)
+{
+	return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+	return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+	return -ENOTSUPP;
+}
+
+/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+				  struct kvm_translation *tr)
+{
+	return 0;
+}
diff --git a/arch/arm/kvm/arm_interrupts.S b/arch/arm/kvm/arm_interrupts.S
new file mode 100644
index 0000000..ea16794
--- /dev/null
+++ b/arch/arm/kvm/arm_interrupts.S
@@ -0,0 +1,206 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ */
+#include <asm/unified.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@  Hypervisor initialization
+@    - should be called with:
+@        r0 = top of Hyp stack (VA)
+@        r1 = virtual HVBAR address
+@    - caller must preserve r0, r1, r12
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+	.text
+	.align 5
+__kvm_hyp_init:
+	.globl __kvm_hyp_init
+
+	@ Hyp-mode exception vector
+	nop
+	nop
+	nop
+	nop
+	nop
+	b	__do_hyp_init
+	nop
+	nop
+
+__do_hyp_init:
+	@ Copy the Hyp stack pointer
+	mov	sp, r0
+	mov	lr, r1
+
+	@ Set the HTTBR to be the same as the TTBR1 holding the kernel
+	@ level-1 page table
+	mrrc	p15, 1, r0, r1, c2
+	mcrr	p15, 4, r0, r1, c2
+
+	@ Set the HTCR to the same shareability and cacheability settings as the
+	@ non-secure TTBCR and with T0SZ == 0.
+	mrc	p15, 4, r0, c2, c0, 2
+	ldr	r12, =HTCR_MASK
+	bic	r0, r0, r12
+	mrc	p15, 0, r1, c2, c0, 2
+	and	r1, r1, #(HTCR_MASK & ~TTBCR_T0SZ)
+	orr	r0, r0, r1
+	mcr	p15, 4, r0, c2, c0, 2
+
+	@ Use the same memory attributes for hyp. accesses as the kernel
+	@ (copy MAIRx ro HMAIRx).
+	mrc	p15, 0, r0, c10, c2, 0
+	mcr	p15, 4, r0, c10, c2, 0
+	mrc	p15, 0, r0, c10, c2, 1
+	mcr	p15, 4, r0, c10, c2, 1
+
+	@ Set the HSCTLR to:
+	@  - ARM/THUMB exceptions: Kernel config
+	@  - Endianness: Kernel config
+	@  - Fast Interrupt Features: Kernel config
+	@  - Write permission implies XN: disabled
+	@  - Instruction cache: enabled
+	@  - Data/Unified cache: enabled
+	@  - Memory alignment checks: enabled
+	@  - MMU: enabled (this code must be run from an identity mapping)
+	mrc	p15, 4, r0, c1, c0, 0
+	ldr	r12, =HSCTLR_MASK
+	bic	r0, r0, r12
+	mrc	p15, 0, r1, c1, c0, 0
+	ldr	r12, =(HSCTLR_TE | HSCTLR_EE | HSCTLR_FI)
+	and	r1, r1, r12
+	ldr	r12, =(HSCTLR_M | HSCTLR_A | HSCTLR_I)
+	orr	r1, r1, r12
+	orr	r0, r0, r1
+	mcr	p15, 4, r0, c1, c0, 0
+
+	@ Set the HVBAR to the virtual address
+	mcr	p15, 4, lr, c12, c0, 0
+
+	@ Return to the kernel
+	eret
+
+	.ltorg
+	.align
+
+__kvm_hyp_init_end:
+	.globl __kvm_hyp_init_end
+	.word 0
+
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@  Hypervisor exception vector and handlers
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+.macro	hyp_entry
+	push	{r0, r1, r2}
+	mrc	p15, 4, r0, c5, c2, 0	@HSR
+	lsr	r1, r0, #26
+	cmp	r1, #0x12		@HVC
+	bne	1f
+
+	@ Let's check if the HVC came from VMID 0 and allow simple
+	@ switch to Hyp mode
+	mrrc    p15, 6, r1, r2, c2
+	lsr     r2, r2, #16
+	and     r2, r2, #0xff
+	cmp     r2, #0
+	bne	1f			@ VMID != 0
+.endm
+
+.macro	hyp_return
+	eret
+.endm
+
+	.text
+	.align 5
+__kvm_hyp_vector:
+	.globl __kvm_hyp_vector
+
+	@ Hyp-mode exception vector
+	b	hyp_reset
+	b	hyp_undef
+	b	hyp_svc
+	b	hyp_pabt
+	b	hyp_dabt
+	b	hyp_hvc
+	b	hyp_irq
+	b	hyp_fiq
+
+hyp_reset:
+	hyp_return
+hyp_undef:
+	hyp_return
+hyp_svc:
+	@ The assumption here is that svc's are never called from within
+	@ the kernel and KVM code running in Hyp mode is very limited, so
+	@ this vector is only used when wanting to return from Hyp mode
+	@ to standard svc mode.
+
+	hyp_entry
+
+	@ If we get here, the entry to hyp mode was a HVC from VMID0.
+	@ That means that the host KVM kernel had previously switched
+	@ to run in Hyp mode, and now wants to switch back. See below.
+	@ The caller should preserve the lr.
+	pop	{r0, r1, r2}
+
+	mov	lr, sp
+	mrs	sp, sp_svc	@ Restore hyp_sp
+	msr	sp_svc, lr
+
+	pop	{lr}		@ Restore original SVC cpsr
+	msr	spsr_hyp, lr
+	eret
+
+	@ Not HVC from VMID 0 - this requires more careful investigation
+1:	pop	{r0, r1, r2}
+	hyp_return
+hyp_pabt:
+	hyp_return
+hyp_dabt:
+	hyp_return
+hyp_hvc:
+	mrs	lr, spsr_hyp
+	push	{lr}		@ Preserve SVC cpsr
+	hyp_entry
+
+	@ If we get here, the entry to hyp mode was a HVC from VMID0.
+	@ That means that the host KVM kernel wants simply to switch
+	@ to run in Hyp mode. Caller should preserve lr and we backup
+	@ the HYP sp in the SVC sp and use the SVC sp as the current
+	@ sp.
+	pop	{r0, r1, r2}
+
+	mrs	lr, sp_svc
+	msr	sp_svc, sp
+	mov	sp, lr
+
+	mrs	lr, ELR_hyp
+	mov	pc, lr
+
+	@ Not HVC from VMID 0 - this requires more careful investigation
+1:	pop	{r0, r1, r2}
+	pop	{lr}
+	hyp_return
+hyp_irq:
+	hyp_return
+hyp_fiq:
+	hyp_return
+
+
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@  Hypervisor world-switch code
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
diff --git a/arch/arm/kvm/arm_mmu.c b/arch/arm/kvm/arm_mmu.c
new file mode 100644
index 0000000..28e2950
--- /dev/null
+++ b/arch/arm/kvm/arm_mmu.c
@@ -0,0 +1,60 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ */
+
+#include <linux/mman.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+#include "trace.h"
+
+/*
+ * Allocate level-1 translation table for stage-2 translation.
+ */
+int kvm_alloc_stage2_pgd(struct kvm *kvm)
+{
+	pgd_t *pgd;
+
+	if (kvm->arch.pgd != NULL) {
+		kvm_err(-EINVAL, "kvm_arch already initialized?\n");
+		return -EINVAL;
+	}
+
+	pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD2_ORDER);
+	if (!pgd)
+		return -ENOMEM;
+
+	memset(pgd, 0, PTRS_PER_PGD2 * sizeof(pgd_t));
+	kvm->arch.pgd = pgd;
+
+	return 0;
+}
+
+/*
+ * Free level-1 translation table for stage-2 translation and all belonging
+ * level-2 and level-3 tables.
+ */
+void kvm_free_stage2_pgd(struct kvm *kvm)
+{
+	if (kvm->arch.pgd == NULL)
+		return;
+
+	free_pages((unsigned long)kvm->arch.pgd, PGD2_ORDER);
+	kvm->arch.pgd = NULL;
+
+	/* TODO: Free child tables */
+	KVMARM_NOT_IMPLEMENTED();
+}
diff --git a/arch/arm/kvm/trace.c b/arch/arm/kvm/trace.c
new file mode 100644
index 0000000..c1ce221
--- /dev/null
+++ b/arch/arm/kvm/trace.c
@@ -0,0 +1,433 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ */
+#include <linux/types.h>
+#include <linux/kvm_types.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_asm.h>
+#include "trace.h"
+
+
+/******************************************************************************
+ * Simple event counting
+ */
+
+struct kvm_event {
+	unsigned long long cnt;
+	char *descr;
+};
+
+static struct kvm_event kvm_eventc_log[KVM_EVENTC_ITEMS] =
+{
+	{ 0, "switch to guest" },
+	{ 0, "exit from guest" },
+	{ 0, "Block VCPU" },
+	{ 0, "Exit to QEMU for IRQ window" },
+	{ 0, "Switch VCPU mode" },
+	{ 0, "VCPU IRQs on" },
+	{ 0, "VCPU IRQs off" },
+	{ 0, "Wait-for-interrupts" },
+	{ 0, "Flush shadow page table" },
+	{ 0, "Virtual TTBR change" },
+	{ 0, "Read guest page table entry" },
+	{ 0, "Map GVA to GFN" },
+	{ 0, "Virtual DACR change" },
+	{ 0, "VCPU switch to privileged mode" },
+	{ 0, "VCPU switch from privileged mode" },
+	{ 0, "VCPU process ID registers change" },
+	{ 0, "Emulate Load/Store with translation" },
+	{ 0, "Emulate MRS" },
+	{ 0, "Emulate MSR" },
+	{ 0, "Emulate CPS" },
+	{ 0, "Need reschedule in execution loop" },
+	{ 0, "MCR 7,  5, 0 - Invalidate entire I-cache" },
+	{ 0, "MCR 7,  5, 1 - Invalidate line in I-cache MVA" },
+	{ 0, "MCR 7,  5, 2 - Invalidate line in I-cache set/way" },
+	{ 0, "MCR 7,  5, 7 - Flush branch target cache - MVA" },
+	{ 0, "MCR 7,  6, 0 - Invalidate entire data cache" },
+	{ 0, "MCR 7,  6, 1 - Invalidate data cache line - MVA" },
+	{ 0, "MCR 7,  6, 2 - Invalidate data cache line - set/way" },
+	{ 0, "MCR 7,  7, 0 - Invalidate D- and I-cache" },
+	{ 0, "MCR 7, 10, 0 - Clean entire data cache" },
+	{ 0, "MCR 7, 10, 1 - Clean data cache line - MVA" },
+	{ 0, "MCR 7, 10, 4 - Data Synchronization Barrier (DSB)" },
+	{ 0, "MCR 7, 14, 0 - Clean and invalidate entire D-cache" },
+	{ 0, "MCR 7, 14, 1 - Clean and invalidate D-cache line - MVA" },
+	{ 0, "MCR 7, 15, 0 - Clean and invalidate unified cache" },
+	{ 0, "MCR 8,  5, 0 - Invalidate instruction TLB" },
+	{ 0, "MCR 8,  6, 0 - Invalidate data TLB" },
+	{ 0, "MCR 8,  7, 0 - Invalidate unified TLB" },
+	{ 0, "Emulate Load-Store multiple" },
+};
+
+void kvm_arm_count_event(unsigned int event)
+{
+	if (event >= KVM_EVENTC_ITEMS)
+		return;
+
+	kvm_eventc_log[event].cnt++;
+}
+
+void kvm_arm_init_eventc(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < KVM_EVENTC_ITEMS; i++)
+		kvm_eventc_log[i].cnt = 0;
+}
+
+struct kvm_event_order {
+	struct kvm_event *event;
+	struct kvm_event_order *next;
+	struct kvm_event_order *prev;
+};
+static struct kvm_event_order event_order[KVM_EVENTC_ITEMS];
+
+static struct kvm_event_order *sort_kvm_event_log(void)
+{
+	unsigned int i;
+	struct kvm_event_order *ptr;
+	struct kvm_event_order head =
+		{ .event = NULL, .next = &head, .prev = &head };
+
+	for (i = 0; i < KVM_EVENTC_ITEMS; i++) {
+		event_order[i].event = &kvm_eventc_log[i];
+		ptr = head.next;
+		while (ptr->event != NULL &&
+		       ptr->event->cnt > kvm_eventc_log[i].cnt) {
+			ptr = ptr->next;
+		}
+		ptr->prev->next = &event_order[i];
+		event_order[i].prev = ptr->prev;
+		event_order[i].next = ptr;
+		ptr->prev = &event_order[i];
+	}
+
+	head.prev->next = NULL; /* Mark end of linked list */
+	return head.next;
+}
+
+/******************************************************************************
+ * Trace ring-buffer local to KVM/ARM
+ */
+
+#define KVM_TRACE_ACTIVITY
+#ifndef KVM_TRACE_ACTIVITY
+void kvm_trace_activity(unsigned int activity, char *fmt, ...)
+{
+}
+#else
+
+#define ACTIVITY_TRACE_ITEMS 50
+#define TRACE_DESCR_LEN 80
+static u32 activity_trace[ACTIVITY_TRACE_ITEMS];
+static u32 activity_trace_cnt[ACTIVITY_TRACE_ITEMS];
+static char activity_trace_descr[ACTIVITY_TRACE_ITEMS][TRACE_DESCR_LEN];
+static int activity_trace_index = 0;
+static bool trace_init = false;
+
+void kvm_trace_activity(unsigned int activity, char *fmt, ...)
+{
+	va_list ap;
+	unsigned int i;
+	char *ptr;
+
+	if (!trace_init) {
+		for (i = 0; i < ACTIVITY_TRACE_ITEMS; i++)
+			activity_trace_descr[i][0] = '\0';
+		trace_init = true;
+	}
+
+	if (activity_trace[activity_trace_index] == activity) {
+		activity_trace_cnt[activity_trace_index]++;
+	} else {
+		activity_trace_index = (activity_trace_index + 1)
+			% ACTIVITY_TRACE_ITEMS;
+		activity_trace[activity_trace_index] = activity;
+		activity_trace_cnt[activity_trace_index] = 0;
+
+		ptr = activity_trace_descr[activity_trace_index];
+		va_start(ap, fmt);
+		vsnprintf(ptr, TRACE_DESCR_LEN, fmt, ap);
+		va_end(ap);
+	}
+}
+#endif
+
+/******************************************************************************
+ * World-switch ring-buffer
+ */
+
+#define WS_TRACE_ITEMS 10
+static u32 ws_trace_enter[WS_TRACE_ITEMS];
+static int ws_trace_enter_index = 0;
+static u32 ws_trace_exit[WS_TRACE_ITEMS];
+static int ws_trace_exit_index = 0;
+static u32 ws_trace_exit_codes[WS_TRACE_ITEMS];
+DEFINE_MUTEX(ws_trace_mutex);
+
+void trace_ws_enter(u32 guest_pc)
+{
+	mutex_lock(&ws_trace_mutex);
+	ws_trace_enter[ws_trace_enter_index++] = guest_pc;
+	if (ws_trace_enter_index >= WS_TRACE_ITEMS)
+		ws_trace_enter_index = 0;
+	mutex_unlock(&ws_trace_mutex);
+}
+
+void trace_ws_exit(u32 guest_pc, u32 exit_code)
+{
+	mutex_lock(&ws_trace_mutex);
+	ws_trace_exit[ws_trace_exit_index] = guest_pc;
+	ws_trace_exit_codes[ws_trace_exit_index++] = exit_code;
+	if (ws_trace_exit_index >= WS_TRACE_ITEMS)
+		ws_trace_exit_index = 0;
+	mutex_unlock(&ws_trace_mutex);
+}
+
+void print_ws_trace(void)
+{
+	int i;
+	mutex_lock(&ws_trace_mutex);
+
+	if (ws_trace_enter_index != ws_trace_exit_index) {
+		kvm_msg("enter and exit WS trace count differ");
+		mutex_unlock(&ws_trace_mutex);
+		return;
+	}
+
+	/* Avoid potential endless loop */
+	if (ws_trace_enter_index < 0 || ws_trace_enter_index >= WS_TRACE_ITEMS) {
+		kvm_msg("ws_trace_enter_index out of bounds: %d",
+				ws_trace_enter_index);
+		mutex_unlock(&ws_trace_mutex);
+		return;
+	}
+
+	for (i = ws_trace_enter_index - 1; i != ws_trace_enter_index; i--) {
+		if (i < 0) {
+			i = WS_TRACE_ITEMS;
+			continue;
+		}
+
+		printk(KERN_ERR "Enter: %08x    Exit: %08x (%d)\n",
+			ws_trace_enter[i],
+			ws_trace_exit[i],
+			ws_trace_exit_codes[i]);
+	}
+	mutex_unlock(&ws_trace_mutex);
+}
+
+/******************************************************************************
+ * Dump total debug info, or write to /proc/kvm
+ */
+
+struct kvm_vcpu *latest_vcpu = NULL;
+
+void print_kvm_debug_info(int (*print_fn)(print_fn_args), struct seq_file *m)
+{
+	int i;
+	struct kvm_vcpu_regs *regs;
+	char *mode = NULL;
+	char *exceptions[7];
+	struct kvm_vcpu *vcpu = latest_vcpu;
+	struct kvm_event_order *ptr;
+
+	print_fn(m, "KVM/ARM runtime info\n");
+	print_fn(m, "======================================================");
+	print_fn(m, "\n\n");
+
+	if (vcpu == NULL) {
+		print_fn(m, "No registered VCPU\n");
+		goto print_ws_hist;
+	}
+
+
+	switch (VCPU_MODE(vcpu)) {
+		case MODE_USER:   mode = "USR"; break;
+		case MODE_FIQ:    mode = "FIQ"; break;
+		case MODE_IRQ:    mode = "IRQ"; break;
+		case MODE_SVC:    mode = "SVC"; break;
+		case MODE_ABORT:  mode = "ABT"; break;
+		case MODE_UNDEF:  mode = "UND"; break;
+		case MODE_SYSTEM: mode = "SYS"; break;
+	}
+
+	vcpu_load(vcpu);
+	regs = &vcpu->arch.regs;
+
+	print_fn(m, "Virtual CPU state:\n\n");
+	print_fn(m, "PC is at: \t%08x\n", vcpu_reg(vcpu, 15));
+	print_fn(m, "CPSR:     \t%08x\n(Mode: %s)  (IRQs: %s)  (FIQs: %s) "
+		      "  (Vec: %s)\n",
+		      regs->cpsr, mode,
+		      (regs->cpsr & PSR_I_BIT) ? "off" : "on",
+		      (regs->cpsr & PSR_F_BIT) ? "off" : "on",
+		      (regs->cpsr & PSR_V_BIT) ? "high" : "low");
+
+	for (i = 0; i <= 12; i++) {
+		if ((i % 4) == 0)
+			print_fn(m, "\nregs[%u]: ", i);
+
+		print_fn(m, "\t0x%08x", vcpu_reg_m(vcpu, i, MODE_USER));
+	}
+
+	print_fn(m, "\n\n");
+	print_fn(m, "Banked registers:  \tr13\t\tr14\t\tspsr\n");
+	print_fn(m, "-------------------\t--------\t--------\t--------\n");
+	print_fn(m, "             USR:  \t%08x\t%08x\t////////\n",
+			vcpu_reg_m(vcpu, 13, MODE_USER),
+			vcpu_reg_m(vcpu, 14, MODE_USER));
+	print_fn(m, "             SVC:  \t%08x\t%08x\t%08x\n",
+			vcpu_reg_m(vcpu, 13, MODE_SVC),
+			vcpu_reg_m(vcpu, 14, MODE_SVC),
+			vcpu_spsr_m(vcpu, MODE_SVC));
+	print_fn(m, "             ABT:  \t%08x\t%08x\t%08x\n",
+			vcpu_reg_m(vcpu, 13, MODE_ABORT),
+			vcpu_reg_m(vcpu, 14, MODE_ABORT),
+			vcpu_spsr_m(vcpu, MODE_ABORT));
+	print_fn(m, "             UND:  \t%08x\t%08x\t%08x\n",
+			vcpu_reg_m(vcpu, 13, MODE_UNDEF),
+			vcpu_reg_m(vcpu, 14, MODE_UNDEF),
+			vcpu_spsr_m(vcpu, MODE_UNDEF));
+	print_fn(m, "             IRQ:  \t%08x\t%08x\t%08x\n",
+			vcpu_reg_m(vcpu, 13, MODE_IRQ),
+			vcpu_reg_m(vcpu, 14, MODE_IRQ),
+			vcpu_spsr_m(vcpu, MODE_IRQ));
+	print_fn(m, "             FIQ:  \t%08x\t%08x\t%08x\n",
+			vcpu_reg_m(vcpu, 13, MODE_FIQ),
+			vcpu_reg_m(vcpu, 14, MODE_FIQ),
+			vcpu_spsr_m(vcpu, MODE_FIQ));
+
+	print_fn(m, "\n");
+	print_fn(m, "fiq regs:\t%08x\t%08x\t%08x\t%08x\n"
+			  "         \t%08x\n",
+			regs->fiq_reg[0], regs->fiq_reg[1], regs->fiq_reg[2],
+			regs->fiq_reg[3], regs->fiq_reg[4]);
+
+print_ws_hist:
+	/*
+	 * Print world-switch trace circular buffer
+	 */
+	print_fn(m, "\n\nWorld switch history:\n");
+	print_fn(m, "---------------------\n");
+	mutex_lock(&ws_trace_mutex);
+
+	if (ws_trace_enter_index != ws_trace_exit_index ||
+			ws_trace_enter_index < 0 ||
+			ws_trace_enter_index >= WS_TRACE_ITEMS)
+	{
+		mutex_unlock(&ws_trace_mutex);
+		goto print_trace_activity;
+	}
+
+	exceptions[0] = "reset";
+	exceptions[1] = "undefined";
+	exceptions[2] = "software";
+	exceptions[3] = "prefetch abort";
+	exceptions[4] = "data abort";
+	exceptions[5] = "irq";
+	exceptions[6] = "fiq";
+
+	for (i = ws_trace_enter_index - 1; i != ws_trace_enter_index; i--) {
+		if (i < 0) {
+			i = WS_TRACE_ITEMS;
+			continue;
+		}
+
+		print_fn(m, "Enter: %08x    Exit: %08x (%s)\n",
+			ws_trace_enter[i], ws_trace_exit[i],
+			exceptions[ws_trace_exit_codes[i]]);
+	}
+	mutex_unlock(&ws_trace_mutex);
+
+print_trace_activity:
+#ifdef KVM_TRACE_ACTIVITY
+	/*
+	 * Print activity trace
+	 */
+	print_fn(m, "\n\nActivity circular buffer:\n");
+	print_fn(m, "-----------------------------\n");
+	for (i = activity_trace_index - 1; i != activity_trace_index; i--) {
+		if (i < 0) {
+			i = ACTIVITY_TRACE_ITEMS;
+			continue;
+		}
+
+		print_fn(m, "%lu: \t %s\n",
+				activity_trace_cnt[i],
+				activity_trace_descr[i]);
+	}
+#endif
+
+	/*
+	 * Print event counters sorted
+	 */
+	print_fn(m, "\n\nEvent counters:\n");
+	print_fn(m, "-----------------------------\n");
+	ptr = sort_kvm_event_log();
+	while (ptr != NULL) {
+		if (ptr->event->cnt > 0) {
+			print_fn(m, "%12llu  #  %s\n", ptr->event->cnt,
+							ptr->event->descr);
+		}
+		ptr = ptr->next;
+	}
+
+	if (vcpu != NULL) {
+		vcpu_put(vcpu);
+	}
+}
+
+static int __printk_relay(struct seq_file *m, const char *fmt, ...)
+{
+	va_list ap;
+	va_start(ap, fmt);
+	vprintk(fmt, ap);
+	va_end(ap);
+	return 0;
+}
+
+void kvm_dump_vcpu_state(void)
+{
+	print_kvm_debug_info(&__printk_relay, NULL);
+}
+
+/******************************************************************************
+ * Printk-log-wrapping functionality
+ */
+
+#define TMP_LOG_LEN 512
+static char __tmp_log_data[TMP_LOG_LEN];
+DEFINE_MUTEX(__tmp_log_lock);
+void __kvm_print_msg(char *fmt, ...)
+{
+	va_list ap;
+	unsigned int size;
+
+	mutex_lock(&__tmp_log_lock);
+
+	va_start(ap, fmt);
+	size = vsnprintf(__tmp_log_data, TMP_LOG_LEN, fmt, ap);
+	va_end(ap);
+
+	if (size >= TMP_LOG_LEN)
+		printk("Message exceeded log length!\n");
+	else
+		printk("%s", __tmp_log_data);
+
+	mutex_unlock(&__tmp_log_lock);
+}
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
new file mode 100644
index 0000000..020240a
--- /dev/null
+++ b/arch/arm/kvm/trace.h
@@ -0,0 +1,108 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ *
+ *
+ * This file contains debugging and tracing functions and definitions for KVM/ARM.
+ *
+ */
+#ifndef __ARM_KVM_TRACE_H__
+#define __ARM_KVM_TRACE_H__
+
+#include <linux/types.h>
+#include <linux/kvm_types.h>
+#include <linux/kvm_host.h>
+
+#define EVENT_GUEST_ENTER	0
+#define EVENT_GUEST_EXIT	1
+#define EVENT_VCPU_BLOCK	2
+#define EVENT_IRQ_WINDOW	3
+#define EVENT_SWITCH_MODE	4
+#define EVENT_VCPU_IRQS_ON	5
+#define EVENT_VCPU_IRQS_OFF	6
+#define EVENT_WFI		7
+#define EVENT_FLUSH_SHADOW	8
+#define EVENT_MOD_TTBR		9
+#define EVENT_READ_GUEST_ENTRY	10
+#define EVENT_MAP_GVA_TO_GFN	11
+#define EVENT_DACR_CHANGE	12
+#define EVENT_SWITCH_PRIV	13
+#define EVENT_SWITCH_USER	14
+#define EVENT_VCPU_ASID		15
+#define EVENT_LS_TRANS		16
+#define EVENT_EMUL_MRS		17
+#define EVENT_EMUL_MSR		18
+#define EVENT_EMUL_CPS		19
+#define EVENT_NEED_RESCHED	20
+#define EVENT_MCR_7_5_0		21
+#define EVENT_MCR_7_5_1		22
+#define EVENT_MCR_7_5_2		23
+#define EVENT_MCR_7_5_7		24
+#define EVENT_MCR_7_6_0		25
+#define EVENT_MCR_7_6_1		26
+#define EVENT_MCR_7_6_2		27
+#define EVENT_MCR_7_7_0		28
+#define EVENT_MCR_7_10_0	29
+#define EVENT_MCR_7_10_1	30
+#define EVENT_MCR_7_10_4	31
+#define EVENT_MCR_7_14_0	32
+#define EVENT_MCR_7_14_1	33
+#define EVENT_MCR_7_15_0	34
+#define EVENT_MCR_8_5_X		35
+#define EVENT_MCR_8_6_X		36
+#define EVENT_MCR_8_7_X		37
+#define EVENT_EMUL_LSMULT	38
+
+#define KVM_EVENTC_ITEMS	39
+
+void kvm_arm_init_eventc(void);
+void kvm_arm_count_event(unsigned int event);
+void kvm_dump_vcpu_state(void);
+
+void trace_ws_enter(u32 guest_pc);
+void trace_ws_exit(u32 guest_pc, u32 exit_code);
+
+
+#define print_fn_args struct seq_file *, const char *, ...
+void print_kvm_debug_info(int (*print_fn)(print_fn_args), struct seq_file *m);
+
+
+void __kvm_print_msg(char *_fmt, ...);
+
+#define kvm_err(err, fmt, args...) do {			\
+	__kvm_print_msg(KERN_ERR "KVM error [%s:%d]: (%d) ", \
+			__FUNCTION__, __LINE__, err); \
+	__kvm_print_msg(fmt "\n", ##args); \
+} while (0)
+
+#define __kvm_msg(fmt, args...) do {			\
+	__kvm_print_msg(KERN_ERR "KVM [%s:%d]: ", __FUNCTION__, __LINE__); \
+	__kvm_print_msg(fmt, ##args); \
+} while (0)
+
+#define kvm_msg(__fmt, __args...) __kvm_msg(__fmt "\n", ##__args)
+
+
+#define KVMARM_NOT_IMPLEMENTED() \
+   { \
+	    printk(KERN_ERR "KVM not implemented [%s:%d] in %s \n", \
+		   __FILE__, __LINE__, __FUNCTION__); \
+   }
+
+extern bool trace_gva_to_gfn;
+void print_shadow_mapping(struct kvm_vcpu *vcpu, gva_t gva);
+void print_ws_trace(void);
+void kvm_trace_activity(unsigned int activity, char *fmt, ...);
+
+#endif  /* __ARM_KVM_TRACE_H__ */


[Index of Archives]     [Linux KVM]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux