When restricted injection is active, only #HV exceptions can be injected into the SEV-SNP guest. Detect that restricted injection feature is active for the guest, and then follow the #HV doorbell communication from the GHCB specification to inject the MCEs. Co-developed-by: Thomas Lendacky <thomas.lendacky@xxxxxxx> Signed-off-by: Thomas Lendacky <thomas.lendacky@xxxxxxx> Signed-off-by: Melody Wang <huibo.wang@xxxxxxx> --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/svm/sev.c | 16 ++++++++++++++-- arch/x86/kvm/svm/svm.c | 17 +++++++++++++++++ arch/x86/kvm/svm/svm.h | 2 ++ arch/x86/kvm/vmx/main.c | 1 + arch/x86/kvm/vmx/vmx.c | 5 +++++ arch/x86/kvm/vmx/x86_ops.h | 1 + arch/x86/kvm/x86.c | 7 +++++++ 9 files changed, 49 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 68ad4f923664..9e5764a8e031 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -76,6 +76,7 @@ KVM_X86_OP(inject_exception) KVM_X86_OP(cancel_injection) KVM_X86_OP(interrupt_allowed) KVM_X86_OP(nmi_allowed) +KVM_X86_OP_OPTIONAL(mce_allowed) KVM_X86_OP(get_nmi_mask) KVM_X86_OP(set_nmi_mask) KVM_X86_OP(enable_nmi_window) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 94e7b5a4fafe..cb1608a69144 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1715,6 +1715,7 @@ struct kvm_x86_ops { void (*cancel_injection)(struct kvm_vcpu *vcpu); int (*interrupt_allowed)(struct kvm_vcpu *vcpu, bool for_injection); int (*nmi_allowed)(struct kvm_vcpu *vcpu, bool for_injection); + int (*mce_allowed)(struct kvm_vcpu *vcpu); bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); /* Whether or not a virtual NMI is pending in hardware. */ diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 7f9f35e0e092..87c493bad93a 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -5071,6 +5071,8 @@ static bool __sev_snp_inject(enum inject_type type, struct kvm_vcpu *vcpu) if (type == INJECT_NMI) hvdb->events.nmi = 1; + else if (type == INJECT_MCE) + hvdb->events.mce = 1; else hvdb->events.vector = vcpu->arch.interrupt.nr; @@ -5088,6 +5090,11 @@ bool sev_snp_queue_exception(struct kvm_vcpu *vcpu) if (!sev_snp_is_rinj_active(vcpu)) return false; + if (vcpu->arch.exception.vector == MC_VECTOR) { + if (__sev_snp_inject(INJECT_MCE, vcpu)) + return true; + } + /* * Restricted injection is enabled, only #HV is supported. * If the vector is not HV_VECTOR, do not inject the exception, @@ -5152,7 +5159,7 @@ void sev_snp_cancel_injection(struct kvm_vcpu *vcpu) /* * KVM only injects a single event each time (prepare_hv_injection), - * so when events.nmi is true, the vector will be zero + * so when events.nmi is true, the mce and vector will be zero */ if (hvdb->events.vector) svm->vmcb->control.event_inj |= hvdb->events.vector | @@ -5161,6 +5168,9 @@ void sev_snp_cancel_injection(struct kvm_vcpu *vcpu) if (hvdb->events.nmi) svm->vmcb->control.event_inj |= SVM_EVTINJ_TYPE_NMI; + if (hvdb->events.mce) + svm->vmcb->control.event_inj |= MC_VECTOR | SVM_EVTINJ_TYPE_EXEPT; + hvdb->events.pending_events = 0; out: @@ -5178,9 +5188,11 @@ bool sev_snp_blocked(enum inject_type type, struct kvm_vcpu *vcpu) if (!hvdb) return true; - /* Indicate NMIs and interrupts blocked based on guest acknowledgment */ + /* Indicate NMIs, MCEs and interrupts blocked based on guest acknowledgment */ if (type == INJECT_NMI) blocked = hvdb->events.nmi; + else if (type == INJECT_MCE) + blocked = hvdb->events.mce; else blocked = !!hvdb->events.vector; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index d9c572344f0c..1c13c5da6eea 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3867,6 +3867,22 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) return 1; } +bool svm_mce_blocked(struct kvm_vcpu *vcpu) +{ + if (sev_snp_is_rinj_active(vcpu)) + return sev_snp_blocked(INJECT_MCE, vcpu); + + return false; +} + +static int svm_mce_allowed(struct kvm_vcpu *vcpu) +{ + if (svm_mce_blocked(vcpu)) + return 0; + + return 1; +} + static void svm_enable_irq_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -5066,6 +5082,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .cancel_injection = svm_cancel_injection, .interrupt_allowed = svm_interrupt_allowed, .nmi_allowed = svm_nmi_allowed, + .mce_allowed = svm_mce_allowed, .get_nmi_mask = svm_get_nmi_mask, .set_nmi_mask = svm_set_nmi_mask, .enable_nmi_window = svm_enable_nmi_window, diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index f60ff6229ff4..0cf32954589f 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -44,6 +44,7 @@ extern int lbrv; enum inject_type { INJECT_IRQ, INJECT_NMI, + INJECT_MCE, }; /* @@ -602,6 +603,7 @@ void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); void disable_nmi_singlestep(struct vcpu_svm *svm); bool svm_smi_blocked(struct kvm_vcpu *vcpu); bool svm_nmi_blocked(struct kvm_vcpu *vcpu); +bool svm_mce_blocked(struct kvm_vcpu *vcpu); bool svm_interrupt_blocked(struct kvm_vcpu *vcpu); void svm_set_gif(struct vcpu_svm *svm, bool value); int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code); diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index 0bf35ebe8a1b..c3a49a3b7f21 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -84,6 +84,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = { .cancel_injection = vmx_cancel_injection, .interrupt_allowed = vmx_interrupt_allowed, .nmi_allowed = vmx_nmi_allowed, + .mce_allowed = vmx_mce_allowed, .get_nmi_mask = vmx_get_nmi_mask, .set_nmi_mask = vmx_set_nmi_mask, .enable_nmi_window = vmx_enable_nmi_window, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index f18c2d8c7476..b3dce5d95329 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5093,6 +5093,11 @@ int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) return !vmx_interrupt_blocked(vcpu); } +int vmx_mce_allowed(struct kvm_vcpu *vcpu) +{ + return 1; +} + int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) { void __user *ret; diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h index ce3221cd1d01..b2b1a3bb4eb3 100644 --- a/arch/x86/kvm/vmx/x86_ops.h +++ b/arch/x86/kvm/vmx/x86_ops.h @@ -92,6 +92,7 @@ void vmx_inject_exception(struct kvm_vcpu *vcpu); void vmx_cancel_injection(struct kvm_vcpu *vcpu); int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection); int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection); +int vmx_mce_allowed(struct kvm_vcpu *vcpu); bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); void vmx_enable_nmi_window(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ef3d3511e4af..e926fc9d82e6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10487,12 +10487,19 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu, } } + if (vcpu->arch.exception.vector == MC_VECTOR) { + r = static_call(kvm_x86_mce_allowed)(vcpu); + if (!r) + goto out_except; + } + kvm_inject_exception(vcpu); vcpu->arch.exception.pending = false; vcpu->arch.exception.injected = true; can_inject = false; +out_except: } /* Don't inject interrupts if the user asked to avoid doing so */ -- 2.34.1