Sets up the required registers to run code in HYP-mode from the kernel. No major controversies, but we should consider how to deal with SMP support for hypervisor stack page. Works by setting the HVBAR the kernel can execute code in Hyp-mode with the MMU disabled which initializes other registers and enables the MMU for Hyp-mode. Unfortunately, this commit doesn't yet work as the configured translation is somehow faulty. --- arch/arm/include/asm/kvm.h | 1 arch/arm/include/asm/kvm_arm.h | 80 ++++++++++++++++++++++++++++ arch/arm/include/asm/kvm_asm.h | 12 ++++ arch/arm/kvm/arm.c | 116 ++++++++++++++++++++++++++++++++++++++++ arch/arm/kvm/arm_interrupts.S | 94 ++++++++++++++++++++++++++++++++ 5 files changed, 302 insertions(+), 1 deletions(-) create mode 100644 arch/arm/include/asm/kvm_arm.h diff --git a/arch/arm/include/asm/kvm.h b/arch/arm/include/asm/kvm.h index 2974b96..8311198 100644 --- a/arch/arm/include/asm/kvm.h +++ b/arch/arm/include/asm/kvm.h @@ -1,4 +1,3 @@ - /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h new file mode 100644 index 0000000..4a8255a --- /dev/null +++ b/arch/arm/include/asm/kvm_arm.h @@ -0,0 +1,80 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef __KVM_ARM_H__ +#define __KVM_ARM_H__ + +#include <asm/types.h> + +/* Hyp Configuration Register (HCR) bits */ +#define HCR_TGE (1 << 27) +#define HCR_TVM (1 << 26) +#define HCR_TTLB (1 << 25) +#define HCR_TPU (1 << 24) +#define HCR_TPC (1 << 23) +#define HCR_TSW (1 << 22) +#define HCR_TAC (1 << 21) +#define HCR_TIDCP (1 << 20) +#define HCR_TSC (1 << 19) +#define HCR_TID3 (1 << 18) +#define HCR_TID2 (1 << 17) +#define HCR_TID1 (1 << 16) +#define HCR_TID0 (1 << 15) +#define HCR_TWE (1 << 14) +#define HCR_TWI (1 << 13) +#define HCR_DC (1 << 12) +#define HCR_BSU (3 << 10) +#define HCR_FB (1 << 9) +#define HCR_VA (1 << 8) +#define HCR_VI (1 << 7) +#define HCR_VF (1 << 6) +#define HCR_AMO (1 << 5) +#define HCR_IMO (1 << 4) +#define HCR_FMO (1 << 3) +#define HCR_PTW (1 << 2) +#define HCR_SWIO (1 << 1) +#define HCR_VM 1 + +/* Hyp System Control Register (HSCTLR) bits */ +#define HSCTLR_TE (1 << 30) +#define HSCTLR_EE (1 << 25) +#define HSCTLR_FI (1 << 21) +#define HSCTLR_WXN (1 << 19) +#define HSCTLR_I (1 << 12) +#define HSCTLR_C (1 << 2) +#define HSCTLR_A (1 << 1) +#define HSCTLR_M 1 +#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \ + HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE) + +/* TTBCR and HTCR Registers bits */ +#define TTBCR_EAE (1 << 31) +#define TTBCR_IMP (1 << 30) +#define TTBCR_SH1 (3 << 28) +#define TTBCR_ORGN1 (3 << 26) +#define TTBCR_IRGN1 (3 << 24) +#define TTBCR_EPD1 (1 << 23) +#define TTBCR_A1 (1 << 22) +#define TTBCR_T1SZ (3 << 16) +#define TTBCR_SH0 (3 << 12) +#define TTBCR_ORGN0 (3 << 10) +#define TTBCR_IRGN0 (3 << 8) +#define TTBCR_EPD0 (1 << 7) +#define TTBCR_T0SZ 3 +#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0) + + +#endif /* __KVM_ARM_H__ */ diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index a612f68..e763da3 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -38,5 +38,17 @@ #define ARM_EXCEPTION_DATA_ABORT 4 #define ARM_EXCEPTION_IRQ 5 #define ARM_EXCEPTION_FIQ 6 +#define ARM_EXCEPTION_HVC 7 + +/* + * SMC Hypervisor API call numbers + */ +#ifdef __ASSEMBLY__ +#define SMC_HYP_CALL(n, x) .equ n, x +#else /* !__ASSEMBLY__ */ +#define SMC_HYP_CALL(n, x) asm(".equ " #n ", " #x); +#endif /* __ASSEMBLY__ */ + +SMC_HYP_CALL(SMCHYP_HVBAR_W , 0xfffffff0) #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 7770342..bf0515e 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -22,19 +22,130 @@ #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <linux/mm.h> #include <linux/mman.h> +#include <linux/sched.h> +#include <asm/unified.h> #include <asm/uaccess.h> #include <asm/ptrace.h> #include <asm/mman.h> +#include <asm/tlbflush.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_mmu.h> +#include "../mm/mm.h" #include "trace.h" +static pgd_t *kvm_hyp_pgd = NULL; +static bool kvm_arm_hardware_enabled = false; +static void *kvm_arm_hyp_stack_page = NULL; +extern unsigned long __kvm_hyp_vector; +extern unsigned long __kvm_hyp_init, __kvm_hyp_init_end; +extern struct mm_struct init_mm; + +/* The VMID used in the VTTBR */ +#define VMID_SIZE (1<<8) +static DECLARE_BITMAP(kvm_vmids, VMID_SIZE); +static DEFINE_MUTEX(kvm_vmids_mutex); + int kvm_arch_hardware_enable(void *garbage) { + int err = 0; + unsigned long vector_ptr, hyp_stack_ptr; + phys_addr_t init_phys_addr, init_end_phys_addr; + pgprot_t prot; + unsigned long l1_addr, l1_descr, l2_addr, l2_descr, phys_addr; + + if (kvm_arm_hardware_enabled) + return 0; + + /* + * Allocate Hyp level-1 page table + */ + kvm_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); + if (!kvm_hyp_pgd) + return -ENOMEM; + + /* + * Allocate stack page for Hypervisor-mode + */ + kvm_arm_hyp_stack_page = (void *)__get_free_page(GFP_KERNEL); + if (!kvm_arm_hyp_stack_page) { + err = -ENOMEM; + goto out_free_pgd; + } + + hyp_stack_ptr = (unsigned long)kvm_arm_hyp_stack_page + PAGE_SIZE; + + init_phys_addr = virt_to_phys((void *)&__kvm_hyp_init); + init_end_phys_addr = virt_to_phys((void *)&__kvm_hyp_init_end); + + /* + * Create identity mapping + */ + hyp_identity_mapping_add(kvm_hyp_pgd, + (unsigned long)init_phys_addr, + (unsigned long)init_end_phys_addr); + + /* + * Set the HVBAR + */ + BUG_ON(init_phys_addr & 0x1f); + asm volatile ( + "mov r0, %[vector_ptr]\n\t" + "ldr r7, =SMCHYP_HVBAR_W\n\t" + "smc #0\n\t" : + : [vector_ptr] "r" ((unsigned long)init_phys_addr) + : "r0", "r7"); + + /* + * Call initialization code + */ + asm volatile ( + "mov r0, %[pgd_ptr]\n\t" + "mov r1, %[stack_ptr]\n\t" + "hvc #0\n\t" : + : [pgd_ptr] "r" (virt_to_phys(kvm_hyp_pgd)), + [stack_ptr] "r" (hyp_stack_ptr) + : "r0", "r1"); + + /* + * Unmap the identity mapping and point to kernel pmd's + */ + hyp_identity_mapping_del(kvm_hyp_pgd, + (unsigned long)init_phys_addr, + (unsigned long)init_end_phys_addr); + /* The kernel's pgd is configured to point to the pmd entries + * irrespectively of the fact that the T1SZ is configured so that TTBR1 + * points directly to the level+2 pmds. */ + memcpy(kvm_hyp_pgd, init_mm.pgd, PTRS_PER_PGD * sizeof(pgd_t)); + flush_tlb_all(); + + /* + * Set the HVBAR to the virtual kernel address + */ + vector_ptr = (unsigned long)&__kvm_hyp_vector; + asm volatile ( + "mov r0, %[vector_ptr]\n\t" + "ldr r7, =SMCHYP_HVBAR_W\n\t" + "smc #0\n\t" : + : [vector_ptr] "r" ((unsigned long)vector_ptr) + : "r0", "r7"); + + __asm__ volatile ("dsb\n\t" + "isb\n\t"); + + kvm_arm_hardware_enabled = true; + return 0; +out_free_pgd: + kfree(kvm_hyp_pgd); + return err; } void kvm_arch_hardware_disable(void *garbage) { + /* There is no need for this now, so we just ignore that */ } int kvm_arch_hardware_setup(void) @@ -224,6 +335,11 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, return -EINVAL; } +int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) +{ + return (!v->arch.wait_for_interrupts); +} + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { KVMARM_NOT_IMPLEMENTED(); diff --git a/arch/arm/kvm/arm_interrupts.S b/arch/arm/kvm/arm_interrupts.S index 073a494..5314ab8 100644 --- a/arch/arm/kvm/arm_interrupts.S +++ b/arch/arm/kvm/arm_interrupts.S @@ -13,5 +13,99 @@ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * */ +#include <asm/unified.h> +#include <asm/page.h> #include <asm/asm-offsets.h> #include <asm/kvm_asm.h> +#include <asm/kvm_arm.h> + +@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +@ Hypervisor initialization +@ - should be called with: +@ r0 = Hypervisor pgd pointer +@ r1 = top of Hyp stack (kernel VA) +@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + .text + .align 5 +__kvm_hyp_init: + .globl __kvm_hyp_init + + @ Hyp-mode exception vector + nop + nop + nop + nop + nop + b __do_hyp_init + nop + nop + +__do_hyp_init: + @ Copy the Hyp stack pointer + adr sp, __kvm_init_sp + push {r1, r2, r12} + + @ Set the HTTBR to be the same as the TTBR1 holding the kernel + @ level-1 page table + mrrc p15, 1, r1, r2, c2 + @mov r1, #0 + mcrr p15, 4, r0, r2, c2 + + @ Set the HTCR and VTCR to the same shareability and cacheability + @ settings as the non-secure TTBCR and with T0SZ == 0. + mrc p15, 4, r0, c2, c0, 2 + ldr r12, =HTCR_MASK + bic r0, r0, r12 + mrc p15, 0, r1, c2, c0, 2 + and r1, r1, #(HTCR_MASK & ~TTBCR_T0SZ) + orr r0, r0, r1 + mcr p15, 4, r0, c2, c0, 2 + + mrc p15, 4, r1, c2, c1, 2 @ VTCR + bic r1, r1, #(VTCR_HTCR_SHARED | VTCR_SL0) + bic r0, r0, #(~VTCR_HTCR_SHARED) + orr r1, r0, r1 + orr r1, r1, #(VTCR_SL_L1 | VTCR_GUEST_T0SZ) + mcr p15, 4, r1, c2, c1, 2 @ VTCR + + @ Use the same memory attributes for hyp. accesses as the kernel + @ (copy MAIRx ro HMAIRx). + mrc p15, 0, r0, c10, c2, 0 + mcr p15, 4, r0, c10, c2, 0 + mrc p15, 0, r0, c10, c2, 1 + mcr p15, 4, r0, c10, c2, 1 + + @ Set the HSCTLR to: + @ - ARM/THUMB exceptions: Kernel config + @ - Endianness: Kernel config + @ - Fast Interrupt Features: Kernel config + @ - Write permission implies XN: disabled + @ - Instruction cache: enabled + @ - Data/Unified cache: enabled + @ - Memory alignment checks: enabled + @ - MMU: enabled (this code must be run from an identity mapping) + mrc p15, 4, r0, c1, c0, 0 + ldr r12, =HSCTLR_MASK + bic r0, r0, r12 + mrc p15, 0, r1, c1, c0, 0 + ldr r12, =(HSCTLR_TE | HSCTLR_EE | HSCTLR_FI) + and r1, r1, r12 + ldr r12, =(HSCTLR_M | HSCTLR_A | HSCTLR_I) + orr r1, r1, r12 + orr r0, r0, r1 + isb + mcr p15, 4, r0, c1, c0, 0 + isb + + @ Set stack pointer and return to the kernel + pop {r1, r2, r12} + mov sp, r1 + eret + + .ltorg + + .align 5 + .word 0 + __kvm_init_sp: + .globl __kvm_hyp_init_end +__kvm_hyp_init_end: