From: Wanpeng Li <wanpengli@xxxxxxxxxxx> Moving the call to svm_exit_handlers_fastpath() after svm_complete_interrupts() since svm_complete_interrupts() consumes rip and reenable the function handle_fastpath_set_msr_irqoff() call in svm_exit_handlers_fastpath(). Suggested-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> Reviewed-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx> Cc: Paul K. <kronenpj@xxxxxxxxxxxxxxxxxxx> Signed-off-by: Wanpeng Li <wanpengli@xxxxxxxxxxx> --- arch/x86/kvm/svm/svm.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index dafc14d..b3e3429 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3347,6 +3347,10 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu) static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) { + if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && + to_svm(vcpu)->vmcb->control.exit_info_1) + return handle_fastpath_set_msr_irqoff(vcpu); + return EXIT_FASTPATH_NONE; } @@ -3495,7 +3499,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) stgi(); /* Any pending NMI will happen here */ - exit_fastpath = svm_exit_handlers_fastpath(vcpu); if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) kvm_after_interrupt(&svm->vcpu); @@ -3530,6 +3533,12 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) svm_complete_interrupts(svm); vmcb_mark_all_clean(svm->vmcb); + + if (is_guest_mode(vcpu)) + return EXIT_FASTPATH_NONE; + + exit_fastpath = svm_exit_handlers_fastpath(vcpu); + return exit_fastpath; } -- 2.7.4