This juggles SPR switching on the entry and exit sides to be more symmetric, which makes the next refactoring patch possible with no functional change. Signed-off-by: Nicholas Piggin <npiggin@xxxxxxxxx> --- arch/powerpc/kvm/book3s_hv.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 85f441d9ce63..7867d6793b3e 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4211,7 +4211,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, msr = mfmsr(); /* TM restore can update msr */ } - switch_pmu_to_guest(vcpu, &host_os_sprs); + load_spr_state(vcpu, &host_os_sprs); load_fp_state(&vcpu->arch.fp); #ifdef CONFIG_ALTIVEC @@ -4219,7 +4219,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, #endif mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); - load_spr_state(vcpu, &host_os_sprs); + switch_pmu_to_guest(vcpu, &host_os_sprs); if (kvmhv_on_pseries()) { /* @@ -4319,6 +4319,8 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, vcpu->arch.slb_max = 0; } + switch_pmu_to_host(vcpu, &host_os_sprs); + store_spr_state(vcpu); store_fp_state(&vcpu->arch.fp); @@ -4333,8 +4335,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, vcpu_vpa_increment_dispatch(vcpu); - switch_pmu_to_host(vcpu, &host_os_sprs); - timer_rearm_host_dec(*tb); restore_p9_host_os_sprs(vcpu, &host_os_sprs); -- 2.23.0