Migrate nested guest NMI intercept processing to new check_nested_events. Signed-off-by: Cathy Avery <cavery@xxxxxxxxxx> --- arch/x86/kvm/svm/nested.c | 21 +++++++++++++++++++++ arch/x86/kvm/svm/svm.c | 2 +- arch/x86/kvm/svm/svm.h | 15 --------------- 3 files changed, 22 insertions(+), 16 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 90a1ca939627..1ba8ef46b0b5 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -764,6 +764,20 @@ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, return vmexit; } +static bool nested_exit_on_nmi(struct vcpu_svm *svm) +{ + return (svm->nested.intercept & (1ULL << INTERCEPT_NMI)); +} + +static void nested_svm_nmi(struct vcpu_svm *svm) +{ + svm->vmcb->control.exit_code = SVM_EXIT_NMI; + svm->vmcb->control.exit_info_1 = 0; + svm->vmcb->control.exit_info_2 = 0; + + svm->nested.exit_required = true; +} + static void nested_svm_intr(struct vcpu_svm *svm) { svm->vmcb->control.exit_code = SVM_EXIT_INTR; @@ -793,6 +807,13 @@ int svm_check_nested_events(struct kvm_vcpu *vcpu) return 0; } + if (vcpu->arch.nmi_pending && nested_exit_on_nmi(svm)) { + if (block_nested_events) + return -EBUSY; + nested_svm_nmi(svm); + return 0; + } + return 0; } diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 2be5bbae3a40..84c338c55348 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3053,7 +3053,7 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu) int ret; ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) && !(svm->vcpu.arch.hflags & HF_NMI_MASK); - ret = ret && gif_set(svm) && nested_svm_nmi(svm); + ret = ret && gif_set(svm); return ret; } diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index df3474f4fb02..9be2b890ff3c 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -369,21 +369,6 @@ void disable_nmi_singlestep(struct vcpu_svm *svm); #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */ -/* This function returns true if it is save to enable the nmi window */ -static inline bool nested_svm_nmi(struct vcpu_svm *svm) -{ - if (!is_guest_mode(&svm->vcpu)) - return true; - - if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI))) - return true; - - svm->vmcb->control.exit_code = SVM_EXIT_NMI; - svm->nested.exit_required = true; - - return false; -} - static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu) { return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK); -- 2.20.1