When restricted injection is active, only #HV exceptions can be injected into the SEV-SNP guest. Detect that restricted injection feature is active for the guest, and then follow the #HV doorbell communication from the GHCB specification to inject the MCEs. Co-developed-by: Thomas Lendacky <thomas.lendacky@xxxxxxx> Signed-off-by: Thomas Lendacky <thomas.lendacky@xxxxxxx> Signed-off-by: Melody Wang <huibo.wang@xxxxxxx> --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/svm/sev.c | 16 ++++++++++++++-- arch/x86/kvm/svm/svm.c | 17 +++++++++++++++++ arch/x86/kvm/svm/svm.h | 2 ++ arch/x86/kvm/vmx/main.c | 1 + arch/x86/kvm/vmx/vmx.c | 5 +++++ arch/x86/kvm/vmx/x86_ops.h | 1 + arch/x86/kvm/x86.c | 7 +++++++ 9 files changed, 49 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 5aff7222e40f..07fb1e2f59cb 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -77,6 +77,7 @@ KVM_X86_OP(inject_exception) KVM_X86_OP(cancel_injection) KVM_X86_OP(interrupt_allowed) KVM_X86_OP(nmi_allowed) +KVM_X86_OP_OPTIONAL(mce_allowed) KVM_X86_OP(get_nmi_mask) KVM_X86_OP(set_nmi_mask) KVM_X86_OP(enable_nmi_window) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index e159e44a6a1b..288b826e384c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1717,6 +1717,7 @@ struct kvm_x86_ops { void (*cancel_injection)(struct kvm_vcpu *vcpu); int (*interrupt_allowed)(struct kvm_vcpu *vcpu, bool for_injection); int (*nmi_allowed)(struct kvm_vcpu *vcpu, bool for_injection); + int (*mce_allowed)(struct kvm_vcpu *vcpu); bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); /* Whether or not a virtual NMI is pending in hardware. */ diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 00d1f620d14a..19fcb0ddcff0 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -5079,6 +5079,8 @@ static void __sev_snp_inject(enum inject_type type, struct kvm_vcpu *vcpu) if (type == INJECT_NMI) hvdb->events.nmi = 1; + else if (type == INJECT_MCE) + hvdb->events.mce = 1; else hvdb->events.vector = vcpu->arch.interrupt.nr; @@ -5094,6 +5096,11 @@ bool sev_snp_queue_exception(struct kvm_vcpu *vcpu) if (!sev_snp_is_rinj_active(vcpu)) return false; + if (vcpu->arch.exception.vector == MC_VECTOR) { + __sev_snp_inject(INJECT_MCE, vcpu); + return true; + } + /* * Restricted injection is enabled, only #HV is supported. * If the vector is not HV_VECTOR, do not inject the exception, @@ -5162,7 +5169,7 @@ void sev_snp_cancel_injection(struct kvm_vcpu *vcpu) /* * KVM only injects a single event each time (prepare_hv_injection), - * so when events.nmi is true, the vector will be zero + * so when events.nmi is true, the mce and vector will be zero */ if (hvdb->events.vector) svm->vmcb->control.event_inj |= hvdb->events.vector | @@ -5171,6 +5178,9 @@ void sev_snp_cancel_injection(struct kvm_vcpu *vcpu) if (hvdb->events.nmi) svm->vmcb->control.event_inj |= SVM_EVTINJ_TYPE_NMI; + if (hvdb->events.mce) + svm->vmcb->control.event_inj |= MC_VECTOR | SVM_EVTINJ_TYPE_EXEPT; + hvdb->events.pending_events = 0; out: @@ -5196,9 +5206,11 @@ bool sev_snp_blocked(enum inject_type type, struct kvm_vcpu *vcpu) if (!hvdb) return true; - /* Indicate NMIs and interrupts blocked based on guest acknowledgment */ + /* Indicate NMIs, MCEs and interrupts blocked based on guest acknowledgment */ if (type == INJECT_NMI) blocked = hvdb->events.nmi; + else if (type == INJECT_MCE) + blocked = hvdb->events.mce; else blocked = !!hvdb->events.vector; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 91bf17684bc8..696653269c55 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3881,6 +3881,22 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) return 1; } +bool svm_mce_blocked(struct kvm_vcpu *vcpu) +{ + if (sev_snp_is_rinj_active(vcpu)) + return sev_snp_blocked(INJECT_MCE, vcpu); + + return false; +} + +static int svm_mce_allowed(struct kvm_vcpu *vcpu) +{ + if (svm_mce_blocked(vcpu)) + return 0; + + return 1; +} + static void svm_enable_irq_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -5091,6 +5107,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .cancel_injection = svm_cancel_injection, .interrupt_allowed = svm_interrupt_allowed, .nmi_allowed = svm_nmi_allowed, + .mce_allowed = svm_mce_allowed, .get_nmi_mask = svm_get_nmi_mask, .set_nmi_mask = svm_set_nmi_mask, .enable_nmi_window = svm_enable_nmi_window, diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index b6e833f455ae..9c71bf01729b 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -58,6 +58,7 @@ extern int lbrv; enum inject_type { INJECT_IRQ, INJECT_NMI, + INJECT_MCE, }; /* @@ -616,6 +617,7 @@ void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); void disable_nmi_singlestep(struct vcpu_svm *svm); bool svm_smi_blocked(struct kvm_vcpu *vcpu); bool svm_nmi_blocked(struct kvm_vcpu *vcpu); +bool svm_mce_blocked(struct kvm_vcpu *vcpu); bool svm_interrupt_blocked(struct kvm_vcpu *vcpu); void svm_set_gif(struct vcpu_svm *svm, bool value); int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code); diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index 92d35cc6cd15..036f750c53c5 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -87,6 +87,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = { .cancel_injection = vmx_cancel_injection, .interrupt_allowed = vmx_interrupt_allowed, .nmi_allowed = vmx_nmi_allowed, + .mce_allowed = vmx_mce_allowed, .get_nmi_mask = vmx_get_nmi_mask, .set_nmi_mask = vmx_set_nmi_mask, .enable_nmi_window = vmx_enable_nmi_window, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 893366e53732..afa6d126324c 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5103,6 +5103,11 @@ int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) return !vmx_interrupt_blocked(vcpu); } +int vmx_mce_allowed(struct kvm_vcpu *vcpu) +{ + return 1; +} + int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) { void __user *ret; diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h index a55981c5216e..8607ef20897d 100644 --- a/arch/x86/kvm/vmx/x86_ops.h +++ b/arch/x86/kvm/vmx/x86_ops.h @@ -93,6 +93,7 @@ void vmx_inject_exception(struct kvm_vcpu *vcpu); void vmx_cancel_injection(struct kvm_vcpu *vcpu); int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection); int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection); +int vmx_mce_allowed(struct kvm_vcpu *vcpu); bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); void vmx_enable_nmi_window(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2e713480933a..a76ce35c5b93 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10357,12 +10357,19 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu, } } + if (vcpu->arch.exception.vector == MC_VECTOR) { + r = static_call(kvm_x86_mce_allowed)(vcpu); + if (!r) + goto out_except; + } + kvm_inject_exception(vcpu); vcpu->arch.exception.pending = false; vcpu->arch.exception.injected = true; can_inject = false; +out_except: } /* Don't inject interrupts if the user asked to avoid doing so */ -- 2.34.1