This juggles SPR switching on the entry and exit sides to be more symmetric, which makes the next refactoring patch possible with no functional change. Signed-off-by: Nicholas Piggin <npiggin@xxxxxxxxx> --- arch/powerpc/kvm/book3s_hv.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 460290cc79af..e817159cd53f 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4209,7 +4209,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, msr = mfmsr(); /* TM restore can update msr */ } - switch_pmu_to_guest(vcpu, &host_os_sprs); + load_spr_state(vcpu, &host_os_sprs); load_fp_state(&vcpu->arch.fp); #ifdef CONFIG_ALTIVEC @@ -4217,7 +4217,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, #endif mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); - load_spr_state(vcpu, &host_os_sprs); + switch_pmu_to_guest(vcpu, &host_os_sprs); if (kvmhv_on_pseries()) { /* @@ -4317,6 +4317,8 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, vcpu->arch.slb_max = 0; } + switch_pmu_to_host(vcpu, &host_os_sprs); + store_spr_state(vcpu); store_fp_state(&vcpu->arch.fp); @@ -4331,8 +4333,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, vcpu_vpa_increment_dispatch(vcpu); - switch_pmu_to_host(vcpu, &host_os_sprs); - timer_rearm_host_dec(*tb); restore_p9_host_os_sprs(vcpu, &host_os_sprs); -- 2.23.0