This reduces the number of mtmsrd required to enable facility bits when saving/restoring registers, by having the KVM code set all bits up front rather than using individual facility functions that set their particular MSR bits. Signed-off-by: Nicholas Piggin <npiggin@xxxxxxxxx> --- arch/powerpc/kernel/process.c | 26 ++++++++++++ arch/powerpc/kvm/book3s_hv.c | 61 ++++++++++++++++++--------- arch/powerpc/kvm/book3s_hv_p9_entry.c | 1 + 3 files changed, 69 insertions(+), 19 deletions(-) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 185beb290580..4d4e99b1756e 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -593,6 +593,32 @@ static void save_all(struct task_struct *tsk) msr_check_and_clear(msr_all_available); } +void save_user_regs_kvm(void) +{ + unsigned long usermsr; + + if (!current->thread.regs) + return; + + usermsr = current->thread.regs->msr; + + if (usermsr & MSR_FP) + save_fpu(current); + + if (usermsr & MSR_VEC) + save_altivec(current); + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (usermsr & MSR_TM) { + current->thread.tm_tfhar = mfspr(SPRN_TFHAR); + current->thread.tm_tfiar = mfspr(SPRN_TFIAR); + current->thread.tm_texasr = mfspr(SPRN_TEXASR); + current->thread.regs->msr &= ~MSR_TM; + } +#endif +} +EXPORT_SYMBOL_GPL(save_user_regs_kvm); + void flush_all_to_thread(struct task_struct *tsk) { if (tsk->thread.regs) { diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index ef8c41396883..77a4138732af 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4139,6 +4139,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, struct p9_host_os_sprs host_os_sprs; s64 dec; u64 tb, next_timer; + unsigned long msr; int trap; WARN_ON_ONCE(vcpu->arch.ceded); @@ -4150,8 +4151,23 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, if (next_timer < time_limit) time_limit = next_timer; + vcpu->arch.ceded = 0; + save_p9_host_os_sprs(&host_os_sprs); + /* MSR bits may have been cleared by context switch */ + msr = 0; + if (IS_ENABLED(CONFIG_PPC_FPU)) + msr |= MSR_FP; + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + msr |= MSR_VEC; + if (cpu_has_feature(CPU_FTR_VSX)) + msr |= MSR_VSX; + if (cpu_has_feature(CPU_FTR_TM) || + cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) + msr |= MSR_TM; + msr = msr_check_and_set(msr); + kvmppc_subcore_enter_guest(); vc->entry_exit_map = 1; @@ -4160,12 +4176,13 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, vcpu_vpa_increment_dispatch(vcpu); if (cpu_has_feature(CPU_FTR_TM) || - cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) + cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) { kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true); + msr = mfmsr(); /* TM restore can update msr */ + } switch_pmu_to_guest(vcpu, &host_os_sprs); - msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX); load_fp_state(&vcpu->arch.fp); #ifdef CONFIG_ALTIVEC load_vr_state(&vcpu->arch.vr); @@ -4274,7 +4291,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, restore_p9_host_os_sprs(vcpu, &host_os_sprs); - msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX); store_fp_state(&vcpu->arch.fp); #ifdef CONFIG_ALTIVEC store_vr_state(&vcpu->arch.vr); @@ -4803,6 +4819,8 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, goto done; } +void save_user_regs_kvm(void); + static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; @@ -4812,19 +4830,24 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) unsigned long user_tar = 0; unsigned int user_vrsave; struct kvm *kvm; + unsigned long msr; if (!vcpu->arch.sane) { run->exit_reason = KVM_EXIT_INTERNAL_ERROR; return -EINVAL; } + /* No need to go into the guest when all we'll do is come back out */ + if (signal_pending(current)) { + run->exit_reason = KVM_EXIT_INTR; + return -EINTR; + } + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * Don't allow entry with a suspended transaction, because * the guest entry/exit code will lose it. - * If the guest has TM enabled, save away their TM-related SPRs - * (they will get restored by the TM unavailable interrupt). */ -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && (current->thread.regs->msr & MSR_TM)) { if (MSR_TM_ACTIVE(current->thread.regs->msr)) { @@ -4832,12 +4855,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) run->fail_entry.hardware_entry_failure_reason = 0; return -EINVAL; } - /* Enable TM so we can read the TM SPRs */ - mtmsr(mfmsr() | MSR_TM); - current->thread.tm_tfhar = mfspr(SPRN_TFHAR); - current->thread.tm_tfiar = mfspr(SPRN_TFIAR); - current->thread.tm_texasr = mfspr(SPRN_TEXASR); - current->thread.regs->msr &= ~MSR_TM; } #endif @@ -4852,18 +4869,24 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) kvmppc_core_prepare_to_enter(vcpu); - /* No need to go into the guest when all we'll do is come back out */ - if (signal_pending(current)) { - run->exit_reason = KVM_EXIT_INTR; - return -EINTR; - } - kvm = vcpu->kvm; atomic_inc(&kvm->arch.vcpus_running); /* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */ smp_mb(); - flush_all_to_thread(current); + msr = 0; + if (IS_ENABLED(CONFIG_PPC_FPU)) + msr |= MSR_FP; + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + msr |= MSR_VEC; + if (cpu_has_feature(CPU_FTR_VSX)) + msr |= MSR_VSX; + if (cpu_has_feature(CPU_FTR_TM) || + cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) + msr |= MSR_TM; + msr = msr_check_and_set(msr); + + save_user_regs_kvm(); /* Save userspace EBB and other register values */ if (cpu_has_feature(CPU_FTR_ARCH_207S)) { diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c index a7f63082b4e3..fb9cb34445ea 100644 --- a/arch/powerpc/kvm/book3s_hv_p9_entry.c +++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c @@ -224,6 +224,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc vc->tb_offset_applied = vc->tb_offset; } + /* Could avoid mfmsr by passing around, but probably no big deal */ msr = mfmsr(); host_hfscr = mfspr(SPRN_HFSCR); -- 2.23.0