Instead of directly accessing the fault registers, use proper accessors so the core code can be shared. Signed-off-by: Marc Zyngier <marc.zyngier@xxxxxxx> --- arch/arm/include/asm/kvm_emulate.h | 21 +++++++++++++++++++++ arch/arm/include/asm/kvm_host.h | 14 ++++++++------ arch/arm/kernel/asm-offsets.c | 8 ++++---- arch/arm/kvm/arm.c | 30 +++++++++++++++--------------- arch/arm/kvm/coproc.c | 24 ++++++++++++------------ arch/arm/kvm/emulate.c | 6 +++--- arch/arm/kvm/mmio.c | 16 ++++++++-------- arch/arm/kvm/mmu.c | 16 ++++++++-------- 8 files changed, 79 insertions(+), 56 deletions(-) diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 2e2ca19..df41a27 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -22,6 +22,7 @@ #include <linux/kvm_host.h> #include <asm/kvm_asm.h> #include <asm/kvm_mmio.h> +#include <asm/kvm_arm.h> unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); @@ -61,4 +62,24 @@ static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg) return reg == 15; } +static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.hsr; +} + +static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.hxfar; +} + +static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) +{ + return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8; +} + +static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.hyp_pc; +} + #endif /* __ARM_KVM_EMULATE_H__ */ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 149c92a..97274ce 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -78,6 +78,13 @@ struct kvm_mmu_memory_cache { void *objects[KVM_NR_MEM_OBJS]; }; +struct kvm_vcpu_fault_info { + u32 hsr; /* Hyp Syndrom Register */ + u32 hxfar; /* Hyp Data/Inst. Fault Address Register */ + u32 hpfar; /* Hyp IPA Fault Address Register */ + u32 hyp_pc; /* PC when exception was taken from Hyp mode */ +}; + struct kvm_vcpu_arch { struct kvm_regs regs; @@ -91,9 +98,7 @@ struct kvm_vcpu_arch { u32 midr; /* Exception Information */ - u32 hsr; /* Hyp Syndrom Register */ - u32 hxfar; /* Hyp Data/Inst Fault Address Register */ - u32 hpfar; /* Hyp IPA Fault Address Register */ + struct kvm_vcpu_fault_info fault; /* Floating point registers (VFP and Advanced SIMD/NEON) */ struct vfp_hard_struct vfp_guest; @@ -123,9 +128,6 @@ struct kvm_vcpu_arch { /* Interrupt related fields */ u32 irq_lines; /* IRQ and FIQ levels */ - /* Hyp exception information */ - u32 hyp_pc; /* PC when exception was taken from Hyp mode */ - /* Cache some mmu pages needed inside spinlock regions */ struct kvm_mmu_memory_cache mmu_page_cache; }; diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index cc5e6af..6840ae8 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -161,10 +161,10 @@ int main(void) DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); - DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr)); - DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar)); - DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar)); - DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc)); + DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr)); + DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar)); + DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar)); + DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); #ifdef CONFIG_KVM_ARM_VGIC DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 79ea0b5..d19cf10 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -470,9 +470,9 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) * Guest called HVC instruction: * Let it know we don't want that by injecting an undefined exception. */ - kvm_debug("hvc: %x (at %08lx)", vcpu->arch.hsr & ((1 << 16) - 1), + kvm_debug("hvc: %x (at %08lx)", kvm_vcpu_get_hsr(vcpu) & ((1 << 16) - 1), *vcpu_pc(vcpu)); - kvm_debug(" HSR: %8x", vcpu->arch.hsr); + kvm_debug(" HSR: %8x", kvm_vcpu_get_hsr(vcpu)); kvm_inject_undefined(vcpu); return 1; } @@ -488,16 +488,16 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) { /* The hypervisor should never cause aborts */ - kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", - vcpu->arch.hxfar, vcpu->arch.hsr); + kvm_err("Prefetch Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n", + kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu)); return -EFAULT; } static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) { /* This is either an error in the ws. code or an external abort */ - kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", - vcpu->arch.hxfar, vcpu->arch.hsr); + kvm_err("Data Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n", + kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu)); return -EFAULT; } @@ -534,17 +534,17 @@ static bool kvm_condition_valid(struct kvm_vcpu *vcpu) * catch undefined instructions, and then we won't get past * the arm_exit_handlers test anyway. */ - BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0); + BUG_ON(((kvm_vcpu_get_hsr(vcpu) & HSR_EC) >> HSR_EC_SHIFT) == 0); /* Top two bits non-zero? Unconditional. */ - if (vcpu->arch.hsr >> 30) + if (kvm_vcpu_get_hsr(vcpu) >> 30) return true; cpsr = *vcpu_cpsr(vcpu); /* Is condition field valid? */ - if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT) - cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT; + if ((kvm_vcpu_get_hsr(vcpu) & HSR_CV) >> HSR_CV_SHIFT) + cond = (kvm_vcpu_get_hsr(vcpu) & HSR_COND) >> HSR_COND_SHIFT; else { /* This can happen in Thumb mode: examine IT state. */ unsigned long it; @@ -577,20 +577,20 @@ static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, case ARM_EXCEPTION_IRQ: return 1; case ARM_EXCEPTION_UNDEFINED: - kvm_err("Undefined exception in Hyp mode at: %#08x\n", - vcpu->arch.hyp_pc); + kvm_err("Undefined exception in Hyp mode at: %#08lx\n", + kvm_vcpu_get_hyp_pc(vcpu)); BUG(); panic("KVM: Hypervisor undefined exception!\n"); case ARM_EXCEPTION_DATA_ABORT: case ARM_EXCEPTION_PREF_ABORT: case ARM_EXCEPTION_HVC: - hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT; + hsr_ec = (kvm_vcpu_get_hsr(vcpu) & HSR_EC) >> HSR_EC_SHIFT; if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) || !arm_exit_handlers[hsr_ec]) { kvm_err("Unkown exception class: %#08lx, " "hsr: %#08x\n", hsr_ec, - (unsigned int)vcpu->arch.hsr); + (unsigned int)kvm_vcpu_get_hsr(vcpu)); BUG(); } @@ -599,7 +599,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, * that fail their condition code check" */ if (!kvm_condition_valid(vcpu)) { - bool is_wide = vcpu->arch.hsr & HSR_IL; + bool is_wide = kvm_vcpu_get_hsr(vcpu) & HSR_IL; kvm_skip_instr(vcpu, is_wide); return 1; } diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index f4933e2..f97fa24 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -289,7 +289,7 @@ static int emulate_cp15(struct kvm_vcpu *vcpu, if (likely(r->access(vcpu, params, r))) { /* Skip instruction, since it was emulated */ - kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); + kvm_skip_instr(vcpu, (kvm_vcpu_get_hsr(vcpu) >> 25) & 1); return 1; } /* If access function fails, it should complain. */ @@ -311,14 +311,14 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct coproc_params params; - params.CRm = (vcpu->arch.hsr >> 1) & 0xf; - params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; - params.is_write = ((vcpu->arch.hsr & 1) == 0); + params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; + params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; + params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0); params.is_64bit = true; - params.Op1 = (vcpu->arch.hsr >> 16) & 0xf; + params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf; params.Op2 = 0; - params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf; + params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; params.CRn = 0; return emulate_cp15(vcpu, ¶ms); @@ -343,14 +343,14 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct coproc_params params; - params.CRm = (vcpu->arch.hsr >> 1) & 0xf; - params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; - params.is_write = ((vcpu->arch.hsr & 1) == 0); + params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; + params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; + params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0); params.is_64bit = false; - params.CRn = (vcpu->arch.hsr >> 10) & 0xf; - params.Op1 = (vcpu->arch.hsr >> 14) & 0x7; - params.Op2 = (vcpu->arch.hsr >> 17) & 0x7; + params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; + params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7; + params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7; params.Rt2 = 0; return emulate_cp15(vcpu, ¶ms); diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index 93c9b07..cc99ce5 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c @@ -551,7 +551,7 @@ static bool decode_thumb_wb(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, bool P = (instr >> 10) & 1; bool U = (instr >> 9) & 1; u8 imm8 = instr & 0xff; - u32 offset_addr = vcpu->arch.hxfar; + u32 offset_addr = kvm_vcpu_get_hfar(vcpu); u8 Rn = (instr >> 16) & 0xf; vcpu->arch.mmio.rd = (instr >> 12) & 0xf; @@ -717,13 +717,13 @@ int kvm_emulate_mmio_ls(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, kvm_debug("Unable to decode inst: %#08lx (cpsr: %#08lx (T=0)" "pc: %#08lx)\n", instr, *vcpu_cpsr(vcpu), *vcpu_pc(vcpu)); - kvm_inject_dabt(vcpu, vcpu->arch.hxfar); + kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); return 1; } else if (is_thumb && !kvm_decode_thumb_ls(vcpu, instr, mmio)) { kvm_debug("Unable to decode inst: %#08lx (cpsr: %#08lx (T=1)" "pc: %#08lx)\n", instr, *vcpu_cpsr(vcpu), *vcpu_pc(vcpu)); - kvm_inject_dabt(vcpu, vcpu->arch.hxfar); + kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); return 1; } diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index 44afad4..516fe27 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c @@ -64,19 +64,19 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, unsigned long rd, len; bool is_write, sign_extend; - if ((vcpu->arch.hsr >> 8) & 1) { + if ((kvm_vcpu_get_hsr(vcpu) >> 8) & 1) { /* cache operation on I/O addr, tell guest unsupported */ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); return 1; } - if ((vcpu->arch.hsr >> 7) & 1) { + if ((kvm_vcpu_get_hsr(vcpu) >> 7) & 1) { /* page table accesses IO mem: tell guest to fix its TTBR */ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); return 1; } - switch ((vcpu->arch.hsr >> 22) & 0x3) { + switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) { case 0: len = 1; break; @@ -91,9 +91,9 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, return -EFAULT; } - is_write = vcpu->arch.hsr & HSR_WNR; - sign_extend = vcpu->arch.hsr & HSR_SSE; - rd = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT; + is_write = kvm_vcpu_get_hsr(vcpu) & HSR_WNR; + sign_extend = kvm_vcpu_get_hsr(vcpu) & HSR_SSE; + rd = (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; if (kvm_vcpu_reg_is_pc(vcpu, rd)) { /* IO memory trying to read/write pc */ @@ -111,7 +111,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * The MMIO instruction is emulated and should not be re-executed * in the guest. */ - kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); + kvm_skip_instr(vcpu, (kvm_vcpu_get_hsr(vcpu) >> 25) & 1); return 0; } @@ -129,7 +129,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, * space do its magic. */ - if (vcpu->arch.hsr & HSR_ISV) + if (kvm_vcpu_get_hsr(vcpu) & HSR_ISV) ret = decode_hsr(vcpu, fault_ipa, &mmio); else ret = kvm_emulate_mmio_ls(vcpu, fault_ipa, &mmio); diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 0ab098e..ec7331e 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -533,7 +533,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (is_iabt) write_fault = false; - else if ((vcpu->arch.hsr & HSR_ISV) && !(vcpu->arch.hsr & HSR_WNR)) + else if ((kvm_vcpu_get_hsr(vcpu) & HSR_ISV) && !(kvm_vcpu_get_hsr(vcpu) & HSR_WNR)) write_fault = false; else write_fault = true; @@ -602,15 +602,15 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) gfn_t gfn; int ret; - hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT; + hsr_ec = kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; is_iabt = (hsr_ec == HSR_EC_IABT); - fault_ipa = ((phys_addr_t)vcpu->arch.hpfar & HPFAR_MASK) << 8; + fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); - trace_kvm_guest_fault(*vcpu_pc(vcpu), vcpu->arch.hsr, - vcpu->arch.hxfar, fault_ipa); + trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), + kvm_vcpu_get_hfar(vcpu), fault_ipa); /* Check the stage-2 fault is trans. fault or write fault */ - fault_status = (vcpu->arch.hsr & HSR_FSC_TYPE); + fault_status = (kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE); if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { kvm_err("Unsupported fault status: EC=%#lx DFCS=%#lx\n", hsr_ec, fault_status); @@ -621,7 +621,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) { if (is_iabt) { /* Prefetch Abort on I/O address */ - kvm_inject_pabt(vcpu, vcpu->arch.hxfar); + kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); return 1; } @@ -632,7 +632,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) } /* Adjust page offset */ - fault_ipa |= vcpu->arch.hxfar & ~PAGE_MASK; + fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ~PAGE_MASK; return io_mem_abort(vcpu, run, fault_ipa, memslot); } -- 1.7.12 _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/cucslists/listinfo/kvmarm