Add wrappers for fpr registers to prepare for supporting PAPR nested guests. Signed-off-by: Jordan Niethe <jpn@xxxxxxxxxxxxxxxxxx> --- arch/powerpc/include/asm/kvm_book3s.h | 31 +++++++++++++++++++++++++++ arch/powerpc/include/asm/kvm_booke.h | 10 +++++++++ arch/powerpc/kvm/book3s.c | 16 +++++++------- arch/powerpc/kvm/emulate_loadstore.c | 2 +- arch/powerpc/kvm/powerpc.c | 22 +++++++++---------- 5 files changed, 61 insertions(+), 20 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 4e91f54a3f9f..a632e79639f0 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -413,6 +413,37 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) return vcpu->arch.fault_dar; } +static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i) +{ + return vcpu->arch.fp.fpr[i][TS_FPROFFSET]; +} + +static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val) +{ + vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val; +} + +static inline u64 kvmppc_get_fpscr(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fp.fpscr; +} + +static inline void kvmppc_set_fpscr(struct kvm_vcpu *vcpu, u64 val) +{ + vcpu->arch.fp.fpscr = val; +} + + +static inline u64 kvmppc_get_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j) +{ + return vcpu->arch.fp.fpr[i][j]; +} + +static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j, u64 val) +{ + vcpu->arch.fp.fpr[i][j] = val; +} + #define BOOK3S_WRAPPER_SET(reg, size) \ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ { \ diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index 0c3401b2e19e..7c3291aa8922 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h @@ -89,6 +89,16 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) return vcpu->arch.regs.nip; } +static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val) +{ + vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val; +} + +static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i) +{ + return vcpu->arch.fp.fpr[i][TS_FPROFFSET]; +} + #ifdef CONFIG_BOOKE static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) { diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index bcb335681387..c12066a12b30 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -614,17 +614,17 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, break; case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: i = id - KVM_REG_PPC_FPR0; - *val = get_reg_val(id, VCPU_FPR(vcpu, i)); + *val = get_reg_val(id, kvmppc_get_fpr(vcpu, i)); break; case KVM_REG_PPC_FPSCR: - *val = get_reg_val(id, vcpu->arch.fp.fpscr); + *val = get_reg_val(id, kvmppc_get_fpscr(vcpu)); break; #ifdef CONFIG_VSX case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: if (cpu_has_feature(CPU_FTR_VSX)) { i = id - KVM_REG_PPC_VSR0; - val->vsxval[0] = vcpu->arch.fp.fpr[i][0]; - val->vsxval[1] = vcpu->arch.fp.fpr[i][1]; + val->vsxval[0] = kvmppc_get_vsx_fpr(vcpu, i, 0); + val->vsxval[1] = kvmppc_get_vsx_fpr(vcpu, i, 1); } else { r = -ENXIO; } @@ -702,7 +702,7 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, break; case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: i = id - KVM_REG_PPC_FPR0; - VCPU_FPR(vcpu, i) = set_reg_val(id, *val); + kvmppc_set_fpr(vcpu, i, set_reg_val(id, *val)); break; case KVM_REG_PPC_FPSCR: vcpu->arch.fp.fpscr = set_reg_val(id, *val); @@ -711,8 +711,8 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: if (cpu_has_feature(CPU_FTR_VSX)) { i = id - KVM_REG_PPC_VSR0; - vcpu->arch.fp.fpr[i][0] = val->vsxval[0]; - vcpu->arch.fp.fpr[i][1] = val->vsxval[1]; + kvmppc_set_vsx_fpr(vcpu, i, 0, val->vsxval[0]); + kvmppc_set_vsx_fpr(vcpu, i, 1, val->vsxval[1]); } else { r = -ENXIO; } @@ -743,7 +743,7 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, break; #endif /* CONFIG_KVM_XIVE */ case KVM_REG_PPC_FSCR: - vcpu->arch.fscr = set_reg_val(id, *val); + kvmppc_set_fpscr(vcpu, set_reg_val(id, *val)); break; case KVM_REG_PPC_TAR: kvmppc_set_tar(vcpu, set_reg_val(id, *val)); diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index cfc9114b87d0..6458ede28b65 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c @@ -250,7 +250,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) vcpu->arch.mmio_sp64_extend = 1; emulated = kvmppc_handle_store(vcpu, - VCPU_FPR(vcpu, op.reg), size, 1); + kvmppc_get_fpr(vcpu, op.reg), size, 1); if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) kvmppc_set_gpr(vcpu, op.update_reg, op.ea); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 5fbef343aaed..9468df8d9987 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -936,7 +936,7 @@ static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, val.vsxval[offset] = gpr; VCPU_VSX_VR(vcpu, index - 32) = val.vval; } else { - VCPU_VSX_FPR(vcpu, index, offset) = gpr; + kvmppc_set_vsx_fpr(vcpu, index, offset, gpr); } } @@ -952,8 +952,8 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, val.vsxval[1] = gpr; VCPU_VSX_VR(vcpu, index - 32) = val.vval; } else { - VCPU_VSX_FPR(vcpu, index, 0) = gpr; - VCPU_VSX_FPR(vcpu, index, 1) = gpr; + kvmppc_set_vsx_fpr(vcpu, index, 0, gpr); + kvmppc_set_vsx_fpr(vcpu, index, 1, gpr); } } @@ -972,8 +972,8 @@ static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, } else { val.vsx32val[0] = gpr; val.vsx32val[1] = gpr; - VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0]; - VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0]; + kvmppc_set_vsx_fpr(vcpu, index, 0, val.vsxval[0]); + kvmppc_set_vsx_fpr(vcpu, index, 1, val.vsxval[0]); } } @@ -995,9 +995,9 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, } else { dword_offset = offset / 2; word_offset = offset % 2; - val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset); + val.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, index, dword_offset); val.vsx32val[word_offset] = gpr32; - VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0]; + kvmppc_set_vsx_fpr(vcpu, index, dword_offset, val.vsxval[0]); } } #endif /* CONFIG_VSX */ @@ -1192,14 +1192,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu) if (vcpu->kvm->arch.kvm_ops->giveup_ext) vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); - VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; + kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr); break; #ifdef CONFIG_PPC_BOOK3S case KVM_MMIO_REG_QPR: vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; case KVM_MMIO_REG_FQPR: - VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; + kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr); vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; #endif @@ -1417,7 +1417,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) } if (rs < 32) { - *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); + *val = kvmppc_get_vsx_fpr(vcpu, rs, vsx_offset); } else { reg.vval = VCPU_VSX_VR(vcpu, rs - 32); *val = reg.vsxval[vsx_offset]; @@ -1436,7 +1436,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) if (rs < 32) { dword_offset = vsx_offset / 2; word_offset = vsx_offset % 2; - reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); + reg.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, rs, dword_offset); *val = reg.vsx32val[word_offset]; } else { reg.vval = VCPU_VSX_VR(vcpu, rs - 32); -- 2.31.1