Similar to NMI, there may be ISA specific reasons why an SMI cannot be injected into the guest. This commit adds a new smi_allowed callback to be implemented in following commits. Signed-off-by: Ladi Prosek <lprosek@xxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/svm.c | 6 ++++++ arch/x86/kvm/vmx.c | 6 ++++++ arch/x86/kvm/x86.c | 2 +- 4 files changed, 14 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 769de6d2e684..1c9d6b90f50c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1062,6 +1062,7 @@ struct kvm_x86_ops { void (*setup_mce)(struct kvm_vcpu *vcpu); + int (*smi_allowed)(struct kvm_vcpu *vcpu); int (*prep_enter_smm)(struct kvm_vcpu *vcpu, char *smstate); int (*post_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase, bool *reload_state); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d9b3e1bea644..e7c6c7fb3e19 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -5393,6 +5393,11 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu) vcpu->arch.mcg_cap &= 0x1ff; } +static int svm_smi_allowed(struct kvm_vcpu *vcpu) +{ + return 1; +} + static int svm_prep_enter_smm(struct kvm_vcpu *vcpu, char *smstate) { /* TODO: Implement */ @@ -5517,6 +5522,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .update_pi_irte = svm_update_pi_irte, .setup_mce = svm_setup_mce, + .smi_allowed = svm_smi_allowed, .prep_enter_smm = svm_prep_enter_smm, .post_leave_smm = svm_post_leave_smm, }; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 15478f413392..bde45f0c27cc 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -11941,6 +11941,11 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu) ~FEATURE_CONTROL_LMCE; } +static int vmx_smi_allowed(struct kvm_vcpu *vcpu) +{ + return 1; +} + static int vmx_prep_enter_smm(struct kvm_vcpu *vcpu, char *smstate) { /* TODO: Implement */ @@ -12080,6 +12085,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .setup_mce = vmx_setup_mce, + .smi_allowed = vmx_smi_allowed, .prep_enter_smm = vmx_prep_enter_smm, .post_leave_smm = vmx_post_leave_smm, }; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e9aef1d858a8..588ef3864ebd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6433,7 +6433,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) } kvm_x86_ops->queue_exception(vcpu); - } else if (vcpu->arch.smi_pending && !is_smm(vcpu)) { + } else if (vcpu->arch.smi_pending && !is_smm(vcpu) && kvm_x86_ops->smi_allowed(vcpu)) { vcpu->arch.smi_pending = false; enter_smm(vcpu); } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { -- 2.13.5