Commit 4b9852f4f389 ("KVM: x86: Fix INIT signal handling in various CPU states") fixed KVM to also latch pending LAPIC INIT event when vCPU is in VMX operation. However, current API of KVM_SET_MP_STATE allows userspace to put vCPU into KVM_MP_STATE_SIPI_RECEIVED or KVM_MP_STATE_INIT_RECEIVED even when vCPU is in VMX operation. Fix this by introducing a util method to check if vCPU state latch INIT signals and use it in KVM_SET_MP_STATE handler. Fixes: 4b9852f4f389 ("KVM: x86: Fix INIT signal handling in various CPU states") Reported-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> Reviewed-by: Mihai Carabas <mihai.carabas@xxxxxxxxxx> Signed-off-by: Liran Alon <liran.alon@xxxxxxxxxx> --- arch/x86/kvm/lapic.c | 5 ++--- arch/x86/kvm/x86.c | 4 ++-- arch/x86/kvm/x86.h | 5 +++++ 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index b29d00b661ff..0df265248cae 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2702,14 +2702,13 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) return; /* - * INITs are latched while CPU is in specific states - * (SMM, VMX non-root mode, SVM with GIF=0). + * INITs are latched while CPU is in specific states. * Because a CPU cannot be in these states immediately * after it has processed an INIT signal (and thus in * KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs * and leave the INIT pending. */ - if (is_smm(vcpu) || kvm_x86_ops->apic_init_signal_blocked(vcpu)) { + if (kvm_vcpu_latch_init(vcpu)) { WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED); if (test_bit(KVM_APIC_SIPI, &apic->pending_events)) clear_bit(KVM_APIC_SIPI, &apic->pending_events); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f41d5d05e9f2..eb992f5d299f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8675,8 +8675,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, mp_state->mp_state != KVM_MP_STATE_RUNNABLE) goto out; - /* INITs are latched while in SMM */ - if ((is_smm(vcpu) || vcpu->arch.smi_pending) && + /* INITs are latched while CPU is in specific states */ + if ((kvm_vcpu_latch_init(vcpu) || vcpu->arch.smi_pending) && (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED || mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED)) goto out; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index dbf7442a822b..d40da892f889 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -260,6 +260,11 @@ static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) return !(kvm->arch.disabled_quirks & quirk); } +static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu) +{ + return is_smm(vcpu) || kvm_x86_ops->apic_init_signal_blocked(vcpu); +} + void kvm_set_pending_timer(struct kvm_vcpu *vcpu); void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); -- 2.20.1