MMIO emulation reads the last instruction executed by the guest and then emulates. If the guest is running in Little Endian mode, the instruction needs to be byte-swapped before being emulated. This patch stores the last instruction in the endian order of the host, primarily doing a byte-swap if needed. The common code which fetches 'last_inst' uses a helper routine kvmppc_need_byteswap(). and the exit paths for the Book3S PV and HR guests use their own version in assembly. Finally, kvmppc_emulate_instruction() uses kvmppc_is_bigendian() to define in which endian order the mmio needs to be done. Signed-off-by: Cédric Le Goater <clg@xxxxxxxxxx> --- arch/powerpc/include/asm/kvm_book3s.h | 9 +++- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 12 ++++++ arch/powerpc/kvm/book3s_segment.S | 11 +++++ arch/powerpc/kvm/emulate.c | 72 +++++++++++++++++-------------- 4 files changed, 71 insertions(+), 33 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 00c2061..9c2b865 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -289,7 +289,14 @@ static inline int kvmppc_ld32(struct kvm_vcpu *vcpu, ulong *eaddr, static inline int kvmppc_ld_inst(struct kvm_vcpu *vcpu, ulong *eaddr, u32 *inst) { - return kvmppc_ld32(vcpu, eaddr, inst, false); + int ret; + + ret = kvmppc_ld32(vcpu, eaddr, inst, false); + + if (kvmppc_need_byteswap(vcpu)) + *inst = swab32(*inst); + + return ret; } static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 77f1baa..7c9978a 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1404,10 +1404,22 @@ fast_interrupt_c_return: lwz r8, 0(r10) mtmsrd r3 + ld r0, VCPU_MSR(r9) + + /* r10 = vcpu->arch.msr & MSR_LE */ + rldicl. r10, r0, 0, 63 + /* Store the result */ stw r8, VCPU_LAST_INST(r9) + beq after_inst_store + + /* Swap and store the result */ + addi r11, r9, VCPU_LAST_INST + stwbrx r8, 0, r11 + /* Unset guest mode. */ +after_inst_store: li r0, KVM_GUEST_MODE_HOST_HV stb r0, HSTATE_IN_GUEST(r13) b guest_exit_cont diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 1abe478..2ceed4c 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S @@ -287,8 +287,19 @@ ld_last_inst: sync #endif + ld r8, SVCPU_SHADOW_SRR1(r13) + + /* r10 = vcpu->arch.msr & MSR_LE */ + rldicl. r10, r8, 0, 63 + stw r0, SVCPU_LAST_INST(r13) + beq no_ld_last_inst + + /* swap and store the result */ + addi r11, r13, SVCPU_LAST_INST + stwbrx r0, 0, r11 + no_ld_last_inst: /* Unset guest mode */ diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 751cd45..76d0a12 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -219,7 +219,6 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) * lmw * stmw * - * XXX is_bigendian should depend on MMU mapping or MSR[LE] */ /* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ @@ -232,6 +231,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) int sprn = get_sprn(inst); enum emulation_result emulated = EMULATE_DONE; int advance = 1; + int is_bigendian = kvmppc_is_bigendian(vcpu); /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); @@ -266,47 +266,53 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) advance = 0; break; case OP_31_XOP_LWZX: - emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); + emulated = kvmppc_handle_load(run, vcpu, rt, 4, + is_bigendian); break; case OP_31_XOP_LBZX: - emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); + emulated = kvmppc_handle_load(run, vcpu, rt, 1, + is_bigendian); break; case OP_31_XOP_LBZUX: - emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); + emulated = kvmppc_handle_load(run, vcpu, rt, 1, + is_bigendian); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_STWX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), - 4, 1); + 4, is_bigendian); break; case OP_31_XOP_STBX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), - 1, 1); + 1, is_bigendian); break; case OP_31_XOP_STBUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), - 1, 1); + 1, is_bigendian); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_LHAX: - emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); + emulated = kvmppc_handle_loads(run, vcpu, rt, 2, + is_bigendian); break; case OP_31_XOP_LHZX: - emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); + emulated = kvmppc_handle_load(run, vcpu, rt, 2, + is_bigendian); break; case OP_31_XOP_LHZUX: - emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); + emulated = kvmppc_handle_load(run, vcpu, rt, 2, + is_bigendian); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; @@ -317,13 +323,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) case OP_31_XOP_STHX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), - 2, 1); + 2, is_bigendian); break; case OP_31_XOP_STHUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), - 2, 1); + 2, is_bigendian); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; @@ -342,7 +348,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) break; case OP_31_XOP_LWBRX: - emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); + emulated = kvmppc_handle_load(run, vcpu, rt, 4, + !is_bigendian); break; case OP_31_XOP_TLBSYNC: @@ -351,17 +358,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) case OP_31_XOP_STWBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), - 4, 0); + 4, !is_bigendian); break; case OP_31_XOP_LHBRX: - emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); + emulated = kvmppc_handle_load(run, vcpu, rt, 2, + !is_bigendian); break; case OP_31_XOP_STHBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), - 2, 0); + 2, !is_bigendian); break; default: @@ -371,33 +379,33 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) break; case OP_LWZ: - emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); + emulated = kvmppc_handle_load(run, vcpu, rt, 4, is_bigendian); break; /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ case OP_LD: rt = get_rt(inst); - emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); + emulated = kvmppc_handle_load(run, vcpu, rt, 8, is_bigendian); break; case OP_LWZU: - emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); + emulated = kvmppc_handle_load(run, vcpu, rt, 4, is_bigendian); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LBZ: - emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); + emulated = kvmppc_handle_load(run, vcpu, rt, 1, is_bigendian); break; case OP_LBZU: - emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); + emulated = kvmppc_handle_load(run, vcpu, rt, 1, is_bigendian); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STW: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), - 4, 1); + 4, is_bigendian); break; /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ @@ -405,57 +413,57 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), - 8, 1); + 8, is_bigendian); break; case OP_STWU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), - 4, 1); + 4, is_bigendian); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STB: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), - 1, 1); + 1, is_bigendian); break; case OP_STBU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), - 1, 1); + 1, is_bigendian); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHZ: - emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); + emulated = kvmppc_handle_load(run, vcpu, rt, 2, is_bigendian); break; case OP_LHZU: - emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); + emulated = kvmppc_handle_load(run, vcpu, rt, 2, is_bigendian); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHA: - emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); + emulated = kvmppc_handle_loads(run, vcpu, rt, 2, is_bigendian); break; case OP_LHAU: - emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); + emulated = kvmppc_handle_loads(run, vcpu, rt, 2, is_bigendian); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STH: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), - 2, 1); + 2, is_bigendian); break; case OP_STHU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), - 2, 1); + 2, is_bigendian); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; -- 1.7.10.4 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html