From: Wanpeng Li <wanpengli@xxxxxxxxxxx> This patch implements handle preemption timer fastpath, after timer fire due to VMX-preemption timer counts down to zero, handle it as soon as possible and vmentry immediately without checking various kvm stuff when possible. Testing on SKX Server. cyclictest in guest(w/o mwait exposed, adaptive advance lapic timer is default -1): 5632.75ns -> 4559.25ns, 19% kvm-unit-test/vmexit.flat: w/o APICv, w/o advance timer: tscdeadline_immed: 4780.75 -> 3851 19.4% tscdeadline: 7474 -> 6528.5 12.7% w/o APICv, w/ adaptive advance timer default -1: tscdeadline_immed: 4845.75 -> 3930.5 18.9% tscdeadline: 6048 -> 5871.75 3% w/ APICv, w/o avanced timer: tscdeadline_immed: 2919 -> 2467.75 15.5% tscdeadline: 5661.75 -> 5188.25 8.4% w/ APICv, w/ adaptive advance timer default -1: tscdeadline_immed: 3018.5 -> 2561 15.2% tscdeadline: 4663.75 -> 4537 2.7% Tested-by: Haiwei Li <lihaiwei@xxxxxxxxxxx> Cc: Haiwei Li <lihaiwei@xxxxxxxxxxx> Signed-off-by: Wanpeng Li <wanpengli@xxxxxxxxxxx> --- arch/x86/kvm/lapic.c | 19 +++++++++++++++++++ arch/x86/kvm/lapic.h | 1 + arch/x86/kvm/vmx/vmx.c | 22 ++++++++++++++++++++++ 3 files changed, 42 insertions(+) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index d652bd9..2741931 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1899,6 +1899,25 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu) EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer); static void kvm_inject_apic_timer_irqs_fast(struct kvm_vcpu *vcpu); +bool kvm_lapic_expired_hv_timer_fast(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + struct kvm_timer *ktimer = &apic->lapic_timer; + + if (!apic_lvtt_tscdeadline(apic) || + !ktimer->hv_timer_in_use || + atomic_read(&ktimer->pending)) + return 0; + + WARN_ON(swait_active(&vcpu->wq)); + cancel_hv_timer(apic); + + ktimer->expired_tscdeadline = ktimer->tscdeadline; + kvm_inject_apic_timer_irqs_fast(vcpu); + + return 1; +} +EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer_fast); void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu) { diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 5ef1364..1b5abd8 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -252,6 +252,7 @@ bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu); void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu); bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu); int kvm_set_lapic_tscdeadline_msr_fast(struct kvm_vcpu *vcpu, u64 data); +bool kvm_lapic_expired_hv_timer_fast(struct kvm_vcpu *vcpu); static inline enum lapic_mode kvm_apic_mode(u64 apic_base) { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 2613e58..527d1c1 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6569,12 +6569,34 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) } } +static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu); + +static enum exit_fastpath_completion handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (kvm_need_cancel_enter_guest(vcpu) || + kvm_event_needs_reinjection(vcpu)) + return EXIT_FASTPATH_NONE; + + if (!vmx->req_immediate_exit && + !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled) && + kvm_lapic_expired_hv_timer_fast(vcpu)) { + trace_kvm_exit(EXIT_REASON_PREEMPTION_TIMER, vcpu, KVM_ISA_VMX); + return EXIT_FASTPATH_CONT_RUN; + } + + return EXIT_FASTPATH_NONE; +} + static enum exit_fastpath_completion vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) { if (!is_guest_mode(vcpu)) { switch (to_vmx(vcpu)->exit_reason) { case EXIT_REASON_MSR_WRITE: return handle_fastpath_set_msr_irqoff(vcpu); + case EXIT_REASON_PREEMPTION_TIMER: + return handle_fastpath_preemption_timer(vcpu); default: return EXIT_FASTPATH_NONE; } -- 2.7.4