From: Kishon Vijay Abraham I <kvijayab@xxxxxxx> Secure AVIC only allows vNMI flow for injecting NMI interrupts to the guest. Also, Secure AVIC hardware manages NMI delivery to the guest and can detect if the guest is in a state to accept NMI. So, update NMI injection code flow for Secure AVIC to inject V_NMI and allow NMI injection if there is no V_NMI pending. In addition, Secure AVIC requires V_NMI_ENABLE in VINTR_CTRL field of VMSA to be set. Set V_NMI_ENABLE in VINTR_CTRL field of VMSA. Signed-off-by: Kishon Vijay Abraham I <kvijayab@xxxxxxx> Co-developed-by: Neeraj Upadhyay <Neeraj.Upadhyay@xxxxxxx> Signed-off-by: Neeraj Upadhyay <Neeraj.Upadhyay@xxxxxxx> --- arch/x86/kvm/svm/sev.c | 2 +- arch/x86/kvm/svm/svm.c | 56 ++++++++++++++++++++++++++---------------- 2 files changed, 36 insertions(+), 22 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 07a8a0c09382..40314c4086c2 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -855,7 +855,7 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm) save->sev_features = sev->vmsa_features; if (sev_savic_active(vcpu->kvm)) - save->vintr_ctrl |= V_GIF_MASK; + save->vintr_ctrl |= V_GIF_MASK | V_NMI_ENABLE_MASK; /* * Skip FPU and AVX setup with KVM_SEV_ES_INIT to avoid diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 58733b63bcd7..08d5dc55e175 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3625,27 +3625,6 @@ static void pre_svm_run(struct kvm_vcpu *vcpu) new_asid(svm, sd); } -static void svm_inject_nmi(struct kvm_vcpu *vcpu) -{ - struct vcpu_svm *svm = to_svm(vcpu); - - svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; - - if (svm->nmi_l1_to_l2) - return; - - /* - * No need to manually track NMI masking when vNMI is enabled, hardware - * automatically sets V_NMI_BLOCKING_MASK as appropriate, including the - * case where software directly injects an NMI. - */ - if (!is_vnmi_enabled(svm)) { - svm->nmi_masked = true; - svm_set_iret_intercept(svm); - } - ++vcpu->stat.nmi_injections; -} - static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -3679,6 +3658,33 @@ static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu) return true; } +static void svm_inject_nmi(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + if (sev_savic_active(vcpu->kvm)) { + svm_set_vnmi_pending(vcpu); + ++vcpu->stat.nmi_injections; + return; + } + + svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; + + if (svm->nmi_l1_to_l2) + return; + + /* + * No need to manually track NMI masking when vNMI is enabled, hardware + * automatically sets V_NMI_BLOCKING_MASK as appropriate, including the + * case where software directly injects an NMI. + */ + if (!is_vnmi_enabled(svm)) { + svm->nmi_masked = true; + svm_set_iret_intercept(svm); + } + ++vcpu->stat.nmi_injections; +} + static void svm_inject_irq(struct kvm_vcpu *vcpu, bool reinjected) { struct vcpu_svm *svm = to_svm(vcpu); @@ -3826,6 +3832,14 @@ bool svm_nmi_blocked(struct kvm_vcpu *vcpu) static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection) { struct vcpu_svm *svm = to_svm(vcpu); + + /* Secure AVIC only support V_NMI based NMI injection. */ + if (sev_savic_active(vcpu->kvm)) { + if (svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK) + return 0; + return 1; + } + if (svm->nested.nested_run_pending) return -EBUSY; -- 2.34.1