Moving the mtmsrd after the host SPRs are saved and before the guest SPRs start to be loaded can prevent an SPR scoreboard stall (because the mtmsrd is L=1 type which does not cause context synchronisation. This is also now more convenient to combined with the mtmsrd L=0 instruction to enable facilities just below, but that is not done yet. -12 cycles (7791) POWER9 virt-mode NULL hcall Signed-off-by: Nicholas Piggin <npiggin@xxxxxxxxx> --- arch/powerpc/kvm/book3s_hv.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 3ac5dbdb59f8..b8b0695a9312 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4015,6 +4015,18 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, save_p9_host_os_sprs(&host_os_sprs); + /* + * This could be combined with MSR[RI] clearing, but that expands + * the unrecoverable window. It would be better to cover unrecoverable + * with KVM bad interrupt handling rather than use MSR[RI] at all. + * + * Much more difficult and less worthwhile to combine with IR/DR + * disable. + */ + hard_irq_disable(); + if (lazy_irq_pending()) + return 0; + /* MSR bits may have been cleared by context switch */ msr = 0; if (IS_ENABLED(CONFIG_PPC_FPU)) @@ -4512,6 +4524,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, struct kvmppc_vcore *vc; struct kvm *kvm = vcpu->kvm; struct kvm_nested_guest *nested = vcpu->arch.nested; + unsigned long flags; trace_kvmppc_run_vcpu_enter(vcpu); @@ -4555,11 +4568,11 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, if (kvm_is_radix(kvm)) kvmppc_prepare_radix_vcpu(vcpu, pcpu); - local_irq_disable(); - hard_irq_disable(); + /* flags save not required, but irq_pmu has no disable/enable API */ + powerpc_local_irq_pmu_save(flags); if (signal_pending(current)) goto sigpend; - if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready) + if (need_resched() || !kvm->arch.mmu_ready) goto out; if (!nested) { @@ -4614,7 +4627,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, guest_exit_irqoff(); - local_irq_enable(); + powerpc_local_irq_pmu_restore(flags); cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest); @@ -4672,7 +4685,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, run->exit_reason = KVM_EXIT_INTR; vcpu->arch.ret = -EINTR; out: - local_irq_enable(); + powerpc_local_irq_pmu_restore(flags); preempt_enable(); goto done; } -- 2.23.0