From: Santosh Shukla <santosh.shukla@xxxxxxx> In the VNMI case, Report NMI is not allowed when V_NMI_PENDING is set which mean virtual NMI already pended for Guest to process while the Guest is busy handling the current virtual NMI. The Guest will first finish handling the current virtual NMI and then it will take the pended event w/o vmexit. Maxim: - disable NMI window unconditionally for now. Signed-off-by: Santosh Shukla <santosh.shukla@xxxxxxx> Signed-off-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx> --- arch/x86/kvm/svm/svm.c | 6 ++++++ arch/x86/kvm/svm/svm.h | 11 +++++++++++ 2 files changed, 17 insertions(+) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index c16f68f6c4f7d7..cfec4c98bb589b 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3595,6 +3595,9 @@ bool svm_nmi_blocked(struct kvm_vcpu *vcpu) if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) return false; + if (is_vnmi_enabled(svm) && is_vnmi_pending_set(svm)) + return true; + ret = (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || (vcpu->arch.hflags & HF_NMI_MASK); @@ -3732,6 +3735,9 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); + if (is_vnmi_enabled(svm)) + return; + if ((vcpu->arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) == HF_NMI_MASK) return; /* IRET will cause a vm exit */ diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index bf7f4851dee204..5f2ee72c6e3125 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -583,6 +583,17 @@ static inline void clear_vnmi_mask(struct vcpu_svm *svm) vmcb->control.int_ctl &= ~V_NMI_MASK; } + +static inline bool is_vnmi_pending_set(struct vcpu_svm *svm) +{ + struct vmcb *vmcb = get_vnmi_vmcb(svm); + + if (vmcb) + return !!(vmcb->control.int_ctl & V_NMI_PENDING); + else + return false; +} + /* svm.c */ #define MSR_INVALID 0xffffffffU -- 2.34.3