No reason why guests shouldn't be able to use the VBAR which will always be available on hosts with Virt. Extensions as that relies on Security Extensions. Signed-off-by: Christoffer Dall <c.dall at virtualopensystems.com> --- arch/arm/include/asm/kvm_host.h | 1 + arch/arm/kernel/asm-offsets.c | 1 + arch/arm/kvm/interrupts.S | 12 ++++++++---- arch/arm/kvm/reset.c | 1 + 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index caeb687..57054bf 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -88,6 +88,7 @@ enum cp15_regs { c6_IFAR, /* Instruction Fault Address Register */ c10_PRRR, /* Primary Region Remap Register */ c10_NMRR, /* Normal Memory Remap Register */ + c12_VBAR, /* Vector Base Address Register */ c13_CID, /* Context ID Register */ c13_TID_URW, /* Thread ID, User R/W */ c13_TID_URO, /* Thread ID, User R/O */ diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index c07c6c2..e7dc451 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -163,6 +163,7 @@ int main(void) DEFINE(VCPU_IFAR, offsetof(struct kvm_vcpu, arch.cp15[c6_IFAR])); DEFINE(VCPU_PRRR, offsetof(struct kvm_vcpu, arch.cp15[c10_PRRR])); DEFINE(VCPU_NMRR, offsetof(struct kvm_vcpu, arch.cp15[c10_NMRR])); + DEFINE(VCPU_VBAR, offsetof(struct kvm_vcpu, arch.cp15[c12_VBAR])); DEFINE(VCPU_CID, offsetof(struct kvm_vcpu, arch.cp15[c13_CID])); DEFINE(VCPU_TID_URW, offsetof(struct kvm_vcpu, arch.cp15[c13_TID_URW])); DEFINE(VCPU_TID_URO, offsetof(struct kvm_vcpu, arch.cp15[c13_TID_URO])); diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index a93ba81..625ba6c 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S @@ -188,9 +188,10 @@ ENDPROC(__kvm_flush_vm_context) mrc p15, 0, r9, c5, c1, 1 @ AIFSR mrc p15, 0, r10, c6, c0, 0 @ DFAR mrc p15, 0, r11, c6, c0, 2 @ IFAR + mrc p15, 0, r12, c12, c0, 0 @ VBAR .if \vcpu == 0 - push {r2-r11} @ Push CP15 registers + push {r2-r12} @ Push CP15 registers .else str r2, [\vcpup, #VCPU_CID] str r3, [\vcpup, #VCPU_TID_URW] @@ -202,6 +203,7 @@ ENDPROC(__kvm_flush_vm_context) str r9, [\vcpup, #VCPU_AIFSR] str r10, [\vcpup, #VCPU_DFAR] str r11, [\vcpup, #VCPU_IFAR] + str r12, [\vcpup, #VCPU_VBAR] .endif .endm @@ -212,7 +214,7 @@ ENDPROC(__kvm_flush_vm_context) */ .macro write_cp15_state vcpu=0, vcpup .if \vcpu == 0 - pop {r2-r11} + pop {r2-r12} .else ldr r2, [\vcpup, #VCPU_CID] ldr r3, [\vcpup, #VCPU_TID_URW] @@ -224,6 +226,7 @@ ENDPROC(__kvm_flush_vm_context) ldr r9, [\vcpup, #VCPU_AIFSR] ldr r10, [\vcpup, #VCPU_DFAR] ldr r11, [\vcpup, #VCPU_IFAR] + ldr r12, [\vcpup, #VCPU_VBAR] .endif mcr p15, 0, r2, c13, c0, 1 @ CID @@ -236,6 +239,7 @@ ENDPROC(__kvm_flush_vm_context) mcr p15, 0, r9, c5, c1, 1 @ AIFSR mcr p15, 0, r10, c6, c0, 0 @ DFAR mcr p15, 0, r11, c6, c0, 2 @ IFAR + mcr p15, 0, r12, c12, c0, 0 @ VBAR .if \vcpu == 0 pop {r2-r11} @@ -267,9 +271,9 @@ ENDPROC(__kvm_flush_vm_context) * (hardware reset value is 0) */ .macro set_hstr entry mrc p15, 4, r2, c1, c1, 3 - ldr r3, =0x9e00 + ldr r3, =0x8e00 .if \entry == 1 - orr r2, r2, r3 @ Trap CR{9,10,11,12,15} + orr r2, r2, r3 @ Trap CR{9,10,11,15} .else bic r2, r2, r3 @ Don't trap any CRx accesses .endif diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c index ce92d4b..78488be 100644 --- a/arch/arm/kvm/reset.c +++ b/arch/arm/kvm/reset.c @@ -61,6 +61,7 @@ static u32 a15_cp15_regs_reset[][2] = { { c6_IFAR, UNKNOWN }, { c10_PRRR, 0x00098AA4 }, { c10_NMRR, 0x44E048E0 }, + { c12_VBAR, 0x00000000 }, { c13_CID, 0x00000000 }, { c13_TID_URW, UNKNOWN }, { c13_TID_URO, UNKNOWN },