On 12/09/2013 10:12 AM, Peter Maydell wrote: > +static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr, > + int size, bool is_signed, bool extend) > +{ > + switch (size) { > + case 0: > + if (is_signed) { > + tcg_gen_qemu_ld8s(dest, tcg_addr, get_mem_index(s)); > + } else { > + tcg_gen_qemu_ld8u(dest, tcg_addr, get_mem_index(s)); > + } > + break; > + case 1: > + if (is_signed) { > + tcg_gen_qemu_ld16s(dest, tcg_addr, get_mem_index(s)); > + } else { > + tcg_gen_qemu_ld16u(dest, tcg_addr, get_mem_index(s)); > + } > + break; > + case 2: > + if (is_signed) { > + tcg_gen_qemu_ld32s(dest, tcg_addr, get_mem_index(s)); > + } else { > + tcg_gen_qemu_ld32u(dest, tcg_addr, get_mem_index(s)); > + } > + break; > + case 3: > + tcg_gen_qemu_ld64(dest, tcg_addr, get_mem_index(s)); > + break; Likewise, combine all of this with tcg_gen_qemu_ld_i64. > + if (extend && is_signed) { > + g_assert(size < 3); > + tcg_gen_ext32u_i64(dest, dest); > + } Is it worth noticing in size==2 && !extend that is_signed can be forced false to avoid the extra extension. > +static void handle_ldp(DisasContext *s, uint32_t insn) > +{ > + int rt = extract32(insn, 0, 5); > + int rn = extract32(insn, 5, 5); > + int rt2 = extract32(insn, 10, 5); > + int64_t offset = sextract32(insn, 15, 7); > + int idx = extract32(insn, 23, 3); > + bool is_signed = false; > + bool is_vector = extract32(insn, 26, 1); > + int opc = extract32(insn, 30, 2); > + int size; > + bool postindex = true; > + bool wback = false; > + > + TCGv_i64 tcg_rt = cpu_reg(s, rt); > + TCGv_i64 tcg_rt2 = cpu_reg(s, rt2); > + TCGv_i64 tcg_addr; > + > + if (opc == 3) { > + unallocated_encoding(s); > + return; > + } > + if (is_vector) { > + size = 2 + opc; > + } else { > + is_signed = opc & 1; > + size = 2 + extract32(opc, 1, 1); > + } > + > + switch (idx) { > + case 1: /* post-index */ > + postindex = true; > + wback = true; > + break; > + case 2: /* signed offset, rn not updated */ > + postindex = false; > + break; > + case 3: /* STP (pre-index) */ > + postindex = false; > + wback = true; > + break; > + default: /* Failed decoder tree? */ > + unallocated_encoding(s); > + break; > + } > + > + offset <<= size; > + > + if (rn == 31) { > + gen_check_sp_alignment(s); > + } > + > + tcg_addr = tcg_temp_new_i64(); > + tcg_gen_mov_i64(tcg_addr, cpu_reg_sp(s, rn)); > + > + if (!postindex) { > + tcg_gen_addi_i64(tcg_addr, tcg_addr, offset); > + } > + > + if (is_vector) { > + do_fp_ld(s, rt, tcg_addr, size); > + } else { > + do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false); > + } > + tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size); > + if (is_vector) { > + do_fp_ld(s, rt2, tcg_addr, size); > + } else { > + do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false); > + } > + > + if (wback) { > + if (postindex) { > + tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size)); > + } else { > + tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size); > + } > + tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr); > + } > + tcg_temp_free_i64(tcg_addr); > +} There's so much overlap with STP, I think these ought not be separate functions. Just merge the middle bit where the actual load/store happens. r~ _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/cucslists/listinfo/kvmarm