On Mon, Nov 5, 2012 at 11:28 AM, Marc Zyngier <marc.zyngier@xxxxxxx> wrote: > On 04/11/12 22:59, Christoffer Dall wrote: >> Prepare to factor out load/store instruction decoding by using struct >> kvm_decode and operate on a separate copy of struct pt_regs. > > I quite like the approach, except for a couple of points below. > >> Signed-off-by: Christoffer Dall <c.dall@xxxxxxxxxxxxxxxxxxxxxx> >> --- >> arch/arm/include/asm/kvm_host.h | 12 ++++-- >> arch/arm/kvm/emulate.c | 84 ++++++++++++++++++++++++--------------- >> arch/arm/kvm/mmio.c | 22 +++++----- >> 3 files changed, 70 insertions(+), 48 deletions(-) >> >> diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h >> index 6c1c6fc..439deb0 100644 >> --- a/arch/arm/include/asm/kvm_host.h >> +++ b/arch/arm/include/asm/kvm_host.h >> @@ -78,6 +78,13 @@ struct kvm_mmu_memory_cache { >> void *objects[KVM_NR_MEM_OBJS]; >> }; >> >> +struct kvm_decode { >> + struct pt_regs *regs; >> + unsigned long hxfar; > > Can we call this field "far" instead? We don't need to know it codes > from the hypervisor, and it will always be data. > yes >> + unsigned long rt; /* destination register for loads */ >> + bool sign_extend; /* for byte/halfword loads */ >> +}; >> + >> struct kvm_vcpu_arch { >> struct kvm_regs regs; >> >> @@ -115,10 +122,7 @@ struct kvm_vcpu_arch { >> bool pause; >> >> /* IO related fields */ >> - struct { >> - bool sign_extend; /* for byte/halfword loads */ >> - u32 rd; >> - } mmio; >> + struct kvm_decode mmio_decode; >> >> /* Interrupt related fields */ >> u32 irq_lines; /* IRQ and FIQ levels */ >> diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c >> index ca72be9..7cbdc68 100644 >> --- a/arch/arm/kvm/emulate.c >> +++ b/arch/arm/kvm/emulate.c >> @@ -304,7 +304,7 @@ struct arm_instr { >> bool sign_extend; >> bool w; >> >> - bool (*decode)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, >> + bool (*decode)(struct kvm_decode *decode, struct kvm_exit_mmio *mmio, >> unsigned long instr, struct arm_instr *ai); >> }; >> >> @@ -380,7 +380,7 @@ u32 shift(u32 value, u8 N, enum SRType type, u8 amount, bool carry_in) >> return value & mask; >> } >> >> -static bool decode_arm_wb(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, >> +static bool decode_arm_wb(struct kvm_decode *decode, struct kvm_exit_mmio *mmio, >> unsigned long instr, const struct arm_instr *ai) >> { >> u8 Rt = (instr >> 12) & 0xf; >> @@ -388,7 +388,7 @@ static bool decode_arm_wb(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, >> u8 W = (instr >> 21) & 1; >> u8 U = (instr >> 23) & 1; >> u8 P = (instr >> 24) & 1; >> - u32 base_addr = *vcpu_reg(vcpu, Rn); >> + u32 base_addr = decode->regs->uregs[Rn]; > > Can we please have accessors in some include file? At lease for stuff > that is architecture specific? Something like: > > static inline unsigned long *kvm_decode_reg(struct kvm_decode *decode, > int reg) > { > return &decode->regs->uregs[reg]; > } > > so it is broadly equivalent to vcpu_reg? This would save me a lot of > hassle for arm64. > sure >> u32 offset_addr, offset; >> >> /* >> @@ -403,14 +403,14 @@ static bool decode_arm_wb(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, >> return false; >> } >> >> - vcpu->arch.mmio.rd = Rt; >> + decode->rt = Rt; >> >> if (ai->register_form) { >> /* Register operation */ >> enum SRType s_type; >> u8 shift_n; >> - bool c_bit = *vcpu_cpsr(vcpu) & PSR_C_BIT; >> - u32 s_reg = *vcpu_reg(vcpu, ai->Rm); >> + bool c_bit = decode->regs->ARM_cpsr & PSR_C_BIT; > > static inline unsigned long kvm_decode_cpsr(struct kvm_decode *decode) > { > return decode->regs->ARM_cpsr; > } > if this will make things easier for arm64, sure, otherwise I'm not sure I agree this improves readability. (I though you weren't going to support this type of stuff for arm64?) >> + u32 s_reg = decode->regs->uregs[ai->Rm]; >> >> s_type = decode_imm_shift(ai->type, ai->shift_n, &shift_n); >> offset = shift(s_reg, 5, s_type, shift_n, c_bit); >> @@ -424,18 +424,18 @@ static bool decode_arm_wb(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, >> offset_addr = base_addr + offset; >> else >> offset_addr = base_addr - offset; >> - *vcpu_reg(vcpu, Rn) = offset_addr; >> + decode->regs->uregs[Rn] = offset_addr; >> return true; >> } >> >> -static bool decode_arm_ls(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, >> +static bool decode_arm_ls(struct kvm_decode *decode, struct kvm_exit_mmio *mmio, >> unsigned long instr, struct arm_instr *ai) >> { >> u8 A = (instr >> 25) & 1; >> >> mmio->is_write = ai->w; >> mmio->len = ai->len; >> - vcpu->arch.mmio.sign_extend = false; >> + decode->sign_extend = false; >> >> ai->register_form = A; >> ai->imm = instr & 0xfff; >> @@ -443,15 +443,16 @@ static bool decode_arm_ls(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, >> ai->type = (instr >> 5) & 0x3; >> ai->shift_n = (instr >> 7) & 0x1f; >> >> - return decode_arm_wb(vcpu, mmio, instr, ai); >> + return decode_arm_wb(decode, mmio, instr, ai); >> } >> >> -static bool decode_arm_extra(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, >> +static bool decode_arm_extra(struct kvm_decode *decode, >> + struct kvm_exit_mmio *mmio, >> unsigned long instr, struct arm_instr *ai) >> { >> mmio->is_write = ai->w; >> mmio->len = ai->len; >> - vcpu->arch.mmio.sign_extend = ai->sign_extend; >> + decode->sign_extend = ai->sign_extend; >> >> ai->register_form = !((instr >> 22) & 1); >> ai->imm = ((instr >> 4) & 0xf0) | (instr & 0xf); >> @@ -459,7 +460,7 @@ static bool decode_arm_extra(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, >> ai->type = 0; /* SRType_LSL */ >> ai->shift_n = 0; >> >> - return decode_arm_wb(vcpu, mmio, instr, ai); >> + return decode_arm_wb(decode, mmio, instr, ai); >> } >> >> /* >> @@ -526,7 +527,7 @@ static const struct arm_instr arm_instr[] = { >> .sign_extend = true , .decode = decode_arm_extra }, >> }; >> >> -static bool kvm_decode_arm_ls(struct kvm_vcpu *vcpu, unsigned long instr, >> +static bool kvm_decode_arm_ls(struct kvm_decode *decode, unsigned long instr, >> struct kvm_exit_mmio *mmio) >> { >> int i; >> @@ -535,7 +536,7 @@ static bool kvm_decode_arm_ls(struct kvm_vcpu *vcpu, unsigned long instr, >> const struct arm_instr *ai = &arm_instr[i]; >> if ((instr & ai->opc_mask) == ai->opc) { >> struct arm_instr ai_copy = *ai; >> - return ai->decode(vcpu, mmio, instr, &ai_copy); >> + return ai->decode(decode, mmio, instr, &ai_copy); >> } >> } >> return false; >> @@ -557,40 +558,42 @@ struct thumb_instr { >> } t32; >> }; >> >> - bool (*decode)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, >> + bool (*decode)(struct kvm_decode *decode, struct kvm_exit_mmio *mmio, >> unsigned long instr, const struct thumb_instr *ti); >> }; >> >> -static bool decode_thumb_wb(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, >> +static bool decode_thumb_wb(struct kvm_decode *decode, >> + struct kvm_exit_mmio *mmio, >> unsigned long instr) >> { >> bool P = (instr >> 10) & 1; >> bool U = (instr >> 9) & 1; >> u8 imm8 = instr & 0xff; >> - u32 offset_addr = vcpu->arch.hxfar; >> + u32 offset_addr = decode->hxfar; >> u8 Rn = (instr >> 16) & 0xf; >> >> - vcpu->arch.mmio.rd = (instr >> 12) & 0xf; >> + decode->rt = (instr >> 12) & 0xf; >> >> - if (kvm_vcpu_reg_is_pc(vcpu, Rn)) >> + if (Rn == 15) >> return false; >> >> /* Handle Writeback */ >> if (!P && U) >> - *vcpu_reg(vcpu, Rn) = offset_addr + imm8; >> + decode->regs->uregs[Rn] = offset_addr + imm8; >> else if (!P && !U) >> - *vcpu_reg(vcpu, Rn) = offset_addr - imm8; >> + decode->regs->uregs[Rn] = offset_addr - imm8; >> return true; >> } >> >> -static bool decode_thumb_str(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, >> +static bool decode_thumb_str(struct kvm_decode *decode, >> + struct kvm_exit_mmio *mmio, >> unsigned long instr, const struct thumb_instr *ti) >> { >> u8 op1 = (instr >> (16 + 5)) & 0x7; >> u8 op2 = (instr >> 6) & 0x3f; >> >> mmio->is_write = true; >> - vcpu->arch.mmio.sign_extend = false; >> + decode->sign_extend = false; >> >> switch (op1) { >> case 0x0: mmio->len = 1; break; >> @@ -602,13 +605,14 @@ static bool decode_thumb_str(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, >> >> if ((op2 & 0x24) == 0x24) { >> /* STRB (immediate, thumb, W=1) */ >> - return decode_thumb_wb(vcpu, mmio, instr); >> + return decode_thumb_wb(decode, mmio, instr); >> } >> >> return false; >> } >> >> -static bool decode_thumb_ldr(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, >> +static bool decode_thumb_ldr(struct kvm_decode *decode, >> + struct kvm_exit_mmio *mmio, >> unsigned long instr, const struct thumb_instr *ti) >> { >> u8 op1 = (instr >> (16 + 7)) & 0x3; >> @@ -623,15 +627,15 @@ static bool decode_thumb_ldr(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, >> } >> >> if (op1 == 0x0) >> - vcpu->arch.mmio.sign_extend = false; >> + decode->sign_extend = false; >> else if (op1 == 0x2 && (ti->t32.op2 & 0x7) != 0x5) >> - vcpu->arch.mmio.sign_extend = true; >> + decode->sign_extend = true; >> else >> return false; /* Only register write-back versions! */ >> >> if ((op2 & 0x24) == 0x24) { >> /* LDR{S}X (immediate, thumb, W=1) */ >> - return decode_thumb_wb(vcpu, mmio, instr); >> + return decode_thumb_wb(decode, mmio, instr); >> } >> >> return false; >> @@ -663,7 +667,7 @@ static const struct thumb_instr thumb_instr[] = { >> >> >> >> -static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu, unsigned long instr, >> +static bool kvm_decode_thumb_ls(struct kvm_decode *decode, unsigned long instr, >> struct kvm_exit_mmio *mmio) >> { >> bool is32 = is_wide_instruction(instr); >> @@ -693,7 +697,7 @@ static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu, unsigned long instr, >> continue; >> } >> >> - return ti->decode(vcpu, mmio, instr, &tinstr); >> + return ti->decode(decode, mmio, instr, &tinstr); >> } >> >> return false; >> @@ -720,6 +724,8 @@ int kvm_emulate_mmio_ls(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, >> { >> bool is_thumb; >> unsigned long instr = 0; >> + struct pt_regs current_regs; >> + struct kvm_decode *decode = &vcpu->arch.mmio_decode; >> >> trace_kvm_mmio_emulate(*vcpu_pc(vcpu), instr, *vcpu_cpsr(vcpu)); >> >> @@ -728,14 +734,22 @@ int kvm_emulate_mmio_ls(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, >> return 1; >> >> mmio->phys_addr = fault_ipa; >> + >> + memcpy(¤t_regs, &vcpu->arch.regs.usr_regs, sizeof(current_regs)); >> + current_regs.ARM_sp = *vcpu_reg(vcpu, 13); >> + current_regs.ARM_lr = *vcpu_reg(vcpu, 14); >> + >> + decode->regs = ¤t_regs; >> + decode->hxfar = vcpu->arch.hxfar; >> + >> is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT); >> - if (!is_thumb && !kvm_decode_arm_ls(vcpu, instr, mmio)) { >> + if (!is_thumb && !kvm_decode_arm_ls(decode, instr, mmio)) { >> kvm_debug("Unable to decode inst: %#08lx (cpsr: %#08x (T=0)" >> "pc: %#08x)\n", >> instr, *vcpu_cpsr(vcpu), *vcpu_pc(vcpu)); >> kvm_inject_dabt(vcpu, vcpu->arch.hxfar); >> return 1; >> - } else if (is_thumb && !kvm_decode_thumb_ls(vcpu, instr, mmio)) { >> + } else if (is_thumb && !kvm_decode_thumb_ls(decode, instr, mmio)) { >> kvm_debug("Unable to decode inst: %#08lx (cpsr: %#08x (T=1)" >> "pc: %#08x)\n", >> instr, *vcpu_cpsr(vcpu), *vcpu_pc(vcpu)); >> @@ -743,6 +757,10 @@ int kvm_emulate_mmio_ls(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, >> return 1; >> } >> >> + memcpy(&vcpu->arch.regs.usr_regs, ¤t_regs, sizeof(current_regs)); >> + *vcpu_reg(vcpu, 13) = current_regs.ARM_sp; >> + *vcpu_reg(vcpu, 14) = current_regs.ARM_lr; >> + >> /* >> * The MMIO instruction is emulated and should not be re-executed >> * in the guest. >> diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c >> index beb7134..eefbead 100644 >> --- a/arch/arm/kvm/mmio.c >> +++ b/arch/arm/kvm/mmio.c >> @@ -37,7 +37,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) >> int mask; >> >> if (!run->mmio.is_write) { >> - dest = vcpu_reg(vcpu, vcpu->arch.mmio.rd); >> + dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt); >> memset(dest, 0, sizeof(int)); >> >> len = run->mmio.len; >> @@ -49,7 +49,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) >> trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, >> *((u64 *)run->mmio.data)); >> >> - if (vcpu->arch.mmio.sign_extend && len < 4) { >> + if (vcpu->arch.mmio_decode.sign_extend && len < 4) { >> mask = 1U << ((len * 8) - 1); >> *dest = (*dest ^ mask) - mask; >> } >> @@ -61,7 +61,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) >> static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, >> struct kvm_exit_mmio *mmio) >> { >> - unsigned long rd, len; >> + unsigned long rt, len; >> bool is_write, sign_extend; >> >> if ((vcpu->arch.hsr >> 8) & 1) { >> @@ -93,9 +93,9 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, >> >> is_write = vcpu->arch.hsr & HSR_WNR; >> sign_extend = vcpu->arch.hsr & HSR_SSE; >> - rd = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT; >> + rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT; >> >> - if (kvm_vcpu_reg_is_pc(vcpu, rd)) { >> + if (kvm_vcpu_reg_is_pc(vcpu, rt)) { >> /* IO memory trying to read/write pc */ >> kvm_inject_pabt(vcpu, vcpu->arch.hxfar); >> return 1; >> @@ -104,8 +104,8 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, >> mmio->is_write = is_write; >> mmio->phys_addr = fault_ipa; >> mmio->len = len; >> - vcpu->arch.mmio.sign_extend = sign_extend; >> - vcpu->arch.mmio.rd = rd; >> + vcpu->arch.mmio_decode.sign_extend = sign_extend; >> + vcpu->arch.mmio_decode.rt = rt; >> >> /* >> * The MMIO instruction is emulated and should not be re-executed >> @@ -119,7 +119,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, >> phys_addr_t fault_ipa, struct kvm_memory_slot *memslot) >> { >> struct kvm_exit_mmio mmio; >> - unsigned long rd; >> + unsigned long rt; >> int ret; >> >> /* >> @@ -137,14 +137,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, >> if (ret != 0) >> return ret; >> >> - rd = vcpu->arch.mmio.rd; >> + rt = vcpu->arch.mmio_decode.rt; >> trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE : >> KVM_TRACE_MMIO_READ_UNSATISFIED, >> mmio.len, fault_ipa, >> - (mmio.is_write) ? *vcpu_reg(vcpu, rd) : 0); >> + (mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0); >> >> if (mmio.is_write) >> - memcpy(mmio.data, vcpu_reg(vcpu, rd), mmio.len); >> + memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len); >> >> if (vgic_handle_mmio(vcpu, run, &mmio)) >> return 1; > > Looks OK otherwise. > Thanks for the review! -Christoffer _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/cucslists/listinfo/kvmarm