This moves PMU switch to guest as late as possible in entry, and switch back to host as early as possible at exit. This helps the host get the most perf coverage of KVM entry/exit code as possible. This is slightly suboptimal for SPR scheduling point of view when the PMU is enabled, but when perf is disabled there is no real difference. Signed-off-by: Nicholas Piggin <npiggin@xxxxxxxxx> --- arch/powerpc/kvm/book3s_hv.c | 6 ++---- arch/powerpc/kvm/book3s_hv_p9_entry.c | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 26872a4993fd..c76deb3de3e9 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3823,8 +3823,6 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns s64 dec; int trap; - switch_pmu_to_guest(vcpu, &host_os_sprs); - save_p9_host_os_sprs(&host_os_sprs); /* @@ -3887,9 +3885,11 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns mtspr(SPRN_DAR, vcpu->arch.shregs.dar); mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); + switch_pmu_to_guest(vcpu, &host_os_sprs); trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs), __pa(&vcpu->arch.regs)); kvmhv_restore_hv_return_state(vcpu, &hvregs); + switch_pmu_to_host(vcpu, &host_os_sprs); vcpu->arch.shregs.msr = vcpu->arch.regs.msr; vcpu->arch.shregs.dar = mfspr(SPRN_DAR); vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); @@ -3908,8 +3908,6 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns restore_p9_host_os_sprs(vcpu, &host_os_sprs); - switch_pmu_to_host(vcpu, &host_os_sprs); - return trap; } diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c index e52d8b040970..48cc94f3d642 100644 --- a/arch/powerpc/kvm/book3s_hv_p9_entry.c +++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c @@ -597,8 +597,6 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR); local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR); - switch_pmu_to_guest(vcpu, &host_os_sprs); - save_p9_host_os_sprs(&host_os_sprs); /* @@ -740,7 +738,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc accumulate_time(vcpu, &vcpu->arch.guest_time); + switch_pmu_to_guest(vcpu, &host_os_sprs); kvmppc_p9_enter_guest(vcpu); + switch_pmu_to_host(vcpu, &host_os_sprs); accumulate_time(vcpu, &vcpu->arch.rm_intr); @@ -951,8 +951,6 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc asm volatile(PPC_CP_ABORT); out: - switch_pmu_to_host(vcpu, &host_os_sprs); - end_timing(vcpu); return trap; -- 2.23.0