On Fri, 24 Apr 2020 at 14:23, Wanpeng Li <kernellwp@xxxxxxxxx> wrote: > > From: Wanpeng Li <wanpengli@xxxxxxxxxxx> > > Introduce need_cancel_enter_guest() helper, we need to check some > conditions before doing CONT_RUN, in addition, it can also catch > the case vmexit occurred while another event was being delivered > to guest software since vmx_complete_interrupts() adds the request > bit. > > Tested-by: Haiwei Li <lihaiwei@xxxxxxxxxxx> > Cc: Haiwei Li <lihaiwei@xxxxxxxxxxx> > Signed-off-by: Wanpeng Li <wanpengli@xxxxxxxxxxx> > --- > arch/x86/kvm/vmx/vmx.c | 12 +++++++----- > arch/x86/kvm/x86.c | 10 ++++++++-- > arch/x86/kvm/x86.h | 1 + > 3 files changed, 16 insertions(+), 7 deletions(-) > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index f1f6638..5c21027 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -6577,7 +6577,7 @@ bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); > > static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu) > { > - enum exit_fastpath_completion exit_fastpath; > + enum exit_fastpath_completion exit_fastpath = EXIT_FASTPATH_NONE; > struct vcpu_vmx *vmx = to_vmx(vcpu); > unsigned long cr3, cr4; > > @@ -6754,10 +6754,12 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu) > vmx_recover_nmi_blocking(vmx); > vmx_complete_interrupts(vmx); > > - exit_fastpath = vmx_exit_handlers_fastpath(vcpu); > - /* static call is better with retpolines */ > - if (exit_fastpath == EXIT_FASTPATH_CONT_RUN) > - goto cont_run; > + if (!kvm_need_cancel_enter_guest(vcpu)) { > + exit_fastpath = vmx_exit_handlers_fastpath(vcpu); > + /* static call is better with retpolines */ > + if (exit_fastpath == EXIT_FASTPATH_CONT_RUN) > + goto cont_run; > + } The kvm_need_cancel_enter_guest() should not before vmx_exit_handlers_fastpath() which will break IPI fastpath. How about applying something like below, otherwise, maybe introduce another EXIT_FASTPATH_CONT_FAIL to indicate fails due to kvm_need_cancel_enter_guest() if checking it after vmx_exit_handlers_fastpath(), then we return 1 in vmx_handle_exit() directly instead of kvm_skip_emulated_instruction(). VMX-preemption timer exit doesn't need to skip emulated instruction but wrmsr TSCDEADLINE MSR exit does which results in a little complex here. Paolo, what do you think? diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 853d3af..9317924 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6564,6 +6564,9 @@ static enum exit_fastpath_completion handle_fastpath_preemption_timer(struct kvm { struct vcpu_vmx *vmx = to_vmx(vcpu); + if (kvm_need_cancel_enter_guest(vcpu)) + return EXIT_FASTPATH_NONE; + if (!vmx->req_immediate_exit && !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) { kvm_lapic_expired_hv_timer(vcpu); @@ -6771,12 +6774,10 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx_recover_nmi_blocking(vmx); vmx_complete_interrupts(vmx); - if (!(kvm_need_cancel_enter_guest(vcpu))) { - exit_fastpath = vmx_exit_handlers_fastpath(vcpu); - if (exit_fastpath == EXIT_FASTPATH_CONT_RUN) { - vmx_sync_pir_to_irr(vcpu); - goto cont_run; - } + exit_fastpath = vmx_exit_handlers_fastpath(vcpu); + if (exit_fastpath == EXIT_FASTPATH_CONT_RUN) { + vmx_sync_pir_to_irr(vcpu); + goto cont_run; } return exit_fastpath; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 99061ba..11b309c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1618,6 +1618,9 @@ static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data) { + if (kvm_need_cancel_enter_guest(vcpu)) + return 1; + if (!kvm_x86_ops.set_hv_timer || kvm_mwait_in_guest(vcpu->kvm) || kvm_can_post_timer_interrupt(vcpu))