merge pv_eoi_get_pending and pv_eoi_clr_pending into a single function pv_eoi_test_and_clear_pending, which returns and clear the value of the pending bit. and clear pv eoi pending bit only when it is set, to avoid calling pv_eoi_put_user(), this can speed about 300 nsec on AMD EPYC most of the time and make pv_eoi_set_pending as inline as there is only one user Suggested-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx> Suggested-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> Signed-off-by: Li RongQing <lirongqing@xxxxxxxxx> --- diff with v1: merge as pv_eoi_test_and_clear_pending add inline for pv_eoi_set_pending arch/x86/kvm/lapic.c | 47 +++++++++++++++++++++++------------------------ 1 files changed, 23 insertions(+), 24 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 76fb009..4da5db8 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -673,18 +673,7 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu) return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; } -static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu) -{ - u8 val; - if (pv_eoi_get_user(vcpu, &val) < 0) { - printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n", - (unsigned long long)vcpu->arch.pv_eoi.msr_val); - return false; - } - return val & KVM_PV_EOI_ENABLED; -} - -static void pv_eoi_set_pending(struct kvm_vcpu *vcpu) +static inline void pv_eoi_set_pending(struct kvm_vcpu *vcpu) { if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) { printk(KERN_WARNING "Can't set EOI MSR value: 0x%llx\n", @@ -694,14 +683,31 @@ static void pv_eoi_set_pending(struct kvm_vcpu *vcpu) __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); } -static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu) +static inline bool pv_eoi_test_and_clr_pending(struct kvm_vcpu *vcpu) { - if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) { + u8 val; + + if (pv_eoi_get_user(vcpu, &val) < 0) { + printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n", + (unsigned long long)vcpu->arch.pv_eoi.msr_val); + return false; + } + + val &= KVM_PV_EOI_ENABLED; + + /* + * Clear pending bit in any case: it will be set again on vmentry. + * While this might not be ideal from performance point of view, + * this makes sure pv eoi is only enabled when we know it's safe. + */ + if (val && pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) { printk(KERN_WARNING "Can't clear EOI MSR value: 0x%llx\n", (unsigned long long)vcpu->arch.pv_eoi.msr_val); - return; + return false; } __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); + + return !!val; } static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr) @@ -2673,7 +2679,6 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu, struct kvm_lapic *apic) { - bool pending; int vector; /* * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host @@ -2687,14 +2692,8 @@ static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu, * -> host enabled PV EOI, guest executed EOI. */ BUG_ON(!pv_eoi_enabled(vcpu)); - pending = pv_eoi_get_pending(vcpu); - /* - * Clear pending bit in any case: it will be set again on vmentry. - * While this might not be ideal from performance point of view, - * this makes sure pv eoi is only enabled when we know it's safe. - */ - pv_eoi_clr_pending(vcpu); - if (pending) + + if (pv_eoi_test_and_clr_pending(vcpu)) return; vector = apic_set_eoi(apic); trace_kvm_pv_eoi(apic, vector); -- 1.7.1