This is a first step to wrapping supervisor and user SPR saving and loading up into helpers, which will then be called independently in bare metal and nested HV cases in order to optimise SPR access. Signed-off-by: Nicholas Piggin <npiggin@xxxxxxxxx> --- arch/powerpc/kvm/book3s_hv.c | 148 ++++++++++++++++++++++------------- 1 file changed, 93 insertions(+), 55 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 6d39e4784af6..12c35b0561d3 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3501,6 +3501,93 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) trace_kvmppc_run_core(vc, 1); } +static void load_spr_state(struct kvm_vcpu *vcpu) +{ + mtspr(SPRN_DSCR, vcpu->arch.dscr); + mtspr(SPRN_IAMR, vcpu->arch.iamr); + mtspr(SPRN_PSPB, vcpu->arch.pspb); + mtspr(SPRN_FSCR, vcpu->arch.fscr); + mtspr(SPRN_TAR, vcpu->arch.tar); + mtspr(SPRN_EBBHR, vcpu->arch.ebbhr); + mtspr(SPRN_EBBRR, vcpu->arch.ebbrr); + mtspr(SPRN_BESCR, vcpu->arch.bescr); + mtspr(SPRN_WORT, vcpu->arch.wort); + mtspr(SPRN_TIDR, vcpu->arch.tid); + mtspr(SPRN_AMR, vcpu->arch.amr); + mtspr(SPRN_UAMOR, vcpu->arch.uamor); + + /* + * DAR, DSISR, and for nested HV, SPRGs must be set with MSR[RI] + * clear (or hstate set appropriately to catch those registers + * being clobbered if we take a MCE or SRESET), so those are done + * later. + */ + + if (!(vcpu->arch.ctrl & 1)) + mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1); +} + +static void store_spr_state(struct kvm_vcpu *vcpu) +{ + vcpu->arch.ctrl = mfspr(SPRN_CTRLF); + + vcpu->arch.iamr = mfspr(SPRN_IAMR); + vcpu->arch.pspb = mfspr(SPRN_PSPB); + vcpu->arch.fscr = mfspr(SPRN_FSCR); + vcpu->arch.tar = mfspr(SPRN_TAR); + vcpu->arch.ebbhr = mfspr(SPRN_EBBHR); + vcpu->arch.ebbrr = mfspr(SPRN_EBBRR); + vcpu->arch.bescr = mfspr(SPRN_BESCR); + vcpu->arch.wort = mfspr(SPRN_WORT); + vcpu->arch.tid = mfspr(SPRN_TIDR); + vcpu->arch.amr = mfspr(SPRN_AMR); + vcpu->arch.uamor = mfspr(SPRN_UAMOR); + vcpu->arch.dscr = mfspr(SPRN_DSCR); +} + +/* + * Privileged (non-hypervisor) host registers to save. + */ +struct p9_host_os_sprs { + unsigned long dscr; + unsigned long tidr; + unsigned long iamr; + unsigned long amr; + unsigned long fscr; +}; + +static void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs) +{ + host_os_sprs->dscr = mfspr(SPRN_DSCR); + host_os_sprs->tidr = mfspr(SPRN_TIDR); + host_os_sprs->iamr = mfspr(SPRN_IAMR); + host_os_sprs->amr = mfspr(SPRN_AMR); + host_os_sprs->fscr = mfspr(SPRN_FSCR); +} + +/* vcpu guest regs must already be saved */ +static void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu, + struct p9_host_os_sprs *host_os_sprs) +{ + mtspr(SPRN_PSPB, 0); + mtspr(SPRN_WORT, 0); + mtspr(SPRN_UAMOR, 0); + + mtspr(SPRN_DSCR, host_os_sprs->dscr); + mtspr(SPRN_TIDR, host_os_sprs->tidr); + mtspr(SPRN_IAMR, host_os_sprs->iamr); + + if (host_os_sprs->amr != vcpu->arch.amr) + mtspr(SPRN_AMR, host_os_sprs->amr); + + if (host_os_sprs->fscr != vcpu->arch.fscr) + mtspr(SPRN_FSCR, host_os_sprs->fscr); + + /* Save guest CTRL register, set runlatch to 1 */ + if (!(vcpu->arch.ctrl & 1)) + mtspr(SPRN_CTRLT, 1); +} + static inline bool hcall_is_xics(unsigned long req) { return req == H_EOI || req == H_CPPR || req == H_IPI || @@ -3515,11 +3602,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr) { struct kvmppc_vcore *vc = vcpu->arch.vcore; - unsigned long host_dscr = mfspr(SPRN_DSCR); - unsigned long host_tidr = mfspr(SPRN_TIDR); - unsigned long host_iamr = mfspr(SPRN_IAMR); - unsigned long host_amr = mfspr(SPRN_AMR); - unsigned long host_fscr = mfspr(SPRN_FSCR); + struct p9_host_os_sprs host_os_sprs; s64 dec; u64 tb; int trap, save_pmu; @@ -3534,6 +3617,8 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, if (local_paca->kvm_hstate.dec_expires < time_limit) time_limit = local_paca->kvm_hstate.dec_expires; + save_p9_host_os_sprs(&host_os_sprs); + kvmhv_save_host_pmu(); /* saves it to PACA kvm_hstate */ kvmppc_subcore_enter_guest(); @@ -3561,28 +3646,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, #endif mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); - mtspr(SPRN_DSCR, vcpu->arch.dscr); - mtspr(SPRN_IAMR, vcpu->arch.iamr); - mtspr(SPRN_PSPB, vcpu->arch.pspb); - mtspr(SPRN_FSCR, vcpu->arch.fscr); - mtspr(SPRN_TAR, vcpu->arch.tar); - mtspr(SPRN_EBBHR, vcpu->arch.ebbhr); - mtspr(SPRN_EBBRR, vcpu->arch.ebbrr); - mtspr(SPRN_BESCR, vcpu->arch.bescr); - mtspr(SPRN_WORT, vcpu->arch.wort); - mtspr(SPRN_TIDR, vcpu->arch.tid); - mtspr(SPRN_AMR, vcpu->arch.amr); - mtspr(SPRN_UAMOR, vcpu->arch.uamor); - - /* - * DAR, DSISR, and for nested HV, SPRGs must be set with MSR[RI] - * clear (or hstate set appropriately to catch those registers - * being clobbered if we take a MCE or SRESET), so those are done - * later. - */ - - if (!(vcpu->arch.ctrl & 1)) - mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1); + load_spr_state(vcpu); /* * When setting DEC, we must always deal with irq_work_raise via NMI vs @@ -3678,36 +3742,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, vcpu->arch.dec_expires = dec + tb; vcpu->cpu = -1; vcpu->arch.thread_cpu = -1; - /* Save guest CTRL register, set runlatch to 1 */ - vcpu->arch.ctrl = mfspr(SPRN_CTRLF); - if (!(vcpu->arch.ctrl & 1)) - mtspr(SPRN_CTRLT, vcpu->arch.ctrl | 1); - - vcpu->arch.iamr = mfspr(SPRN_IAMR); - vcpu->arch.pspb = mfspr(SPRN_PSPB); - vcpu->arch.fscr = mfspr(SPRN_FSCR); - vcpu->arch.tar = mfspr(SPRN_TAR); - vcpu->arch.ebbhr = mfspr(SPRN_EBBHR); - vcpu->arch.ebbrr = mfspr(SPRN_EBBRR); - vcpu->arch.bescr = mfspr(SPRN_BESCR); - vcpu->arch.wort = mfspr(SPRN_WORT); - vcpu->arch.tid = mfspr(SPRN_TIDR); - vcpu->arch.amr = mfspr(SPRN_AMR); - vcpu->arch.uamor = mfspr(SPRN_UAMOR); - vcpu->arch.dscr = mfspr(SPRN_DSCR); - - mtspr(SPRN_PSPB, 0); - mtspr(SPRN_WORT, 0); - mtspr(SPRN_UAMOR, 0); - mtspr(SPRN_DSCR, host_dscr); - mtspr(SPRN_TIDR, host_tidr); - mtspr(SPRN_IAMR, host_iamr); - if (host_amr != vcpu->arch.amr) - mtspr(SPRN_AMR, host_amr); + store_spr_state(vcpu); - if (host_fscr != vcpu->arch.fscr) - mtspr(SPRN_FSCR, host_fscr); + restore_p9_host_os_sprs(vcpu, &host_os_sprs); msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX); store_fp_state(&vcpu->arch.fp); -- 2.23.0