This additionally has to save and restore the host SLB, and also ensure that the MMU is off while switching into the guest SLB. P9 and later CPUs now always go via the P9 path. Signed-off-by: Nicholas Piggin <npiggin@xxxxxxxxx> --- arch/powerpc/kvm/book3s_64_entry.S | 6 +++++ arch/powerpc/kvm/book3s_hv.c | 4 ++- arch/powerpc/kvm/book3s_hv_interrupt.c | 35 ++++++++++++++++++++++---- 3 files changed, 39 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_entry.S b/arch/powerpc/kvm/book3s_64_entry.S index 8cb1322cfe00..8d5b92f258ea 100644 --- a/arch/powerpc/kvm/book3s_64_entry.S +++ b/arch/powerpc/kvm/book3s_64_entry.S @@ -313,6 +313,12 @@ kvmppc_p9_exit_interrupt: * effort for a small bit of code. Lots of other things to do first. */ kvmppc_p9_bad_interrupt: +BEGIN_MMU_FTR_SECTION + /* + * Hash host doesn't try to recover MMU (requires host SLB reload) + */ + b . +END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) /* * Set GUEST_MODE_NONE so the handler won't branch to KVM, and clear * MSR_RI in r12 ([H]SRR1) so the handler won't try to return. diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 588ac794a90b..db3f7e0bf832 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4436,7 +4436,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; do { - if (radix_enabled()) + if (cpu_has_feature(CPU_FTR_ARCH_300)) r = kvmhv_run_single_vcpu(vcpu, ~(u64)0, vcpu->arch.vcore->lpcr); else @@ -5516,6 +5516,8 @@ static int kvmhv_enable_nested(struct kvm *kvm) return -EPERM; if (!cpu_has_feature(CPU_FTR_ARCH_300)) return -ENODEV; + if (!radix_enabled()) + return -ENODEV; /* kvm == NULL means the caller is testing if the capability exists */ if (kvm) diff --git a/arch/powerpc/kvm/book3s_hv_interrupt.c b/arch/powerpc/kvm/book3s_hv_interrupt.c index aba3641bae4f..d29ca797cb50 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupt.c +++ b/arch/powerpc/kvm/book3s_hv_interrupt.c @@ -143,12 +143,29 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 } -static void switch_mmu_to_host_radix(struct kvm *kvm, u32 pid) +static void switch_mmu_to_host(struct kvm *kvm, u32 pid) { mtspr(SPRN_PID, pid); mtspr(SPRN_LPID, kvm->arch.host_lpid); mtspr(SPRN_LPCR, kvm->arch.host_lpcr); isync(); + + if (!radix_enabled()) + slb_restore_bolted_realmode(); +} + +static void save_clear_host_mmu(struct kvm *kvm) +{ + if (!radix_enabled()) { + /* + * Hash host could save and restore host SLB entries to + * reduce SLB fault overheads of VM exits, but for now the + * existing code clears all entries and restores just the + * bolted ones when switching back to host. + */ + mtslb(0, 0, 0); + slb_invalidate(6); + } } static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu) @@ -283,15 +300,23 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc mtspr(SPRN_AMOR, ~0UL); local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_GUEST_HV_FAST; + + /* + * Hash host, hash guest, or radix guest with prefetch bug, all have + * to disable the MMU before switching to guest MMU state. + */ + if (!radix_enabled() || !kvm_is_radix(kvm) || + cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) + __mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0); + + save_clear_host_mmu(kvm); + if (kvm_is_radix(kvm)) { - if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) - __mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0); switch_mmu_to_guest_radix(kvm, vcpu, lpcr); if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) __mtmsrd(0, 1); /* clear RI */ } else { - __mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0); switch_mmu_to_guest_hpt(kvm, vcpu, lpcr); } @@ -477,7 +502,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc mtspr(SPRN_HDEC, decrementer_max); save_clear_guest_mmu(kvm, vcpu); - switch_mmu_to_host_radix(kvm, host_pidr); + switch_mmu_to_host(kvm, host_pidr); local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_NONE; /* -- 2.23.0