From: Jan Kiszka <jan.kiszka@xxxxxxxxxxx> On Intel, raising INIT causing an unconditional vmexit. On AMD, this is controlled by the interception mask. Signed-off-by: Jan Kiszka <jan.kiszka@xxxxxxxxxxx> --- Sorry for double-posting, failed to CC the list on first try. arch/x86/include/asm/kvm_host.h | 1 + arch/x86/include/asm/vmx.h | 2 ++ arch/x86/kvm/svm.c | 16 ++++++++++++++++ arch/x86/kvm/vmx.c | 15 +++++++++++++++ arch/x86/kvm/x86.c | 6 ++++++ 5 files changed, 40 insertions(+), 0 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 635a74d..5b0534a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -681,6 +681,7 @@ struct kvm_x86_ops { void (*run)(struct kvm_vcpu *vcpu); int (*handle_exit)(struct kvm_vcpu *vcpu); + bool (*handle_init_received)(struct kvm_vcpu *vcpu); void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 5c9dbad..40f7f74 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -30,6 +30,7 @@ #define EXIT_REASON_EXCEPTION_NMI 0 #define EXIT_REASON_EXTERNAL_INTERRUPT 1 #define EXIT_REASON_TRIPLE_FAULT 2 +#define EXIT_REASON_INIT_SIGNAL 3 #define EXIT_REASON_PENDING_INTERRUPT 7 #define EXIT_REASON_NMI_WINDOW 8 @@ -74,6 +75,7 @@ { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \ { EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \ { EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \ + { EXIT_REASON_INIT_SIGNAL, "INIT_SIGNAL" }, \ { EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \ { EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \ { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \ diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index e1b1ce2..1118631 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -2390,6 +2390,21 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) return 0; } +static bool nested_svm_handle_init_received(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + if (!is_guest_mode(vcpu) || + !(svm->nested.intercept & (1ULL << INTERCEPT_INIT))) + return false; + + svm->vmcb->control.exit_code = SVM_EXIT_INIT; + svm->vmcb->control.exit_int_info = 0; + nested_svm_vmexit(svm); + + return true; +} + static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) { /* @@ -4295,6 +4310,7 @@ static struct kvm_x86_ops svm_x86_ops = { .run = svm_vcpu_run, .handle_exit = handle_exit, + .handle_init_received = nested_svm_handle_init_received, .skip_emulated_instruction = skip_emulated_instruction, .set_interrupt_shadow = svm_set_interrupt_shadow, .get_interrupt_shadow = svm_get_interrupt_shadow, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ccb6456..6830d26 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -7538,6 +7538,20 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu) nested_vmx_succeed(vcpu); } +static bool nested_vmx_handle_init_received(struct kvm_vcpu *vcpu) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + + if (!is_guest_mode(vcpu)) + return false; + + nested_vmx_vmexit(vcpu); + vmcs12->vm_exit_reason = EXIT_REASON_INIT_SIGNAL; + vmcs12->vm_exit_intr_info = 0; + + return true; +} + /* * L1's failure to enter L2 is a subset of a normal exit, as explained in * 23.7 "VM-entry failures during or after loading guest state" (this also @@ -7610,6 +7624,7 @@ static struct kvm_x86_ops vmx_x86_ops = { .run = vmx_vcpu_run, .handle_exit = vmx_handle_exit, + .handle_init_received = nested_vmx_handle_init_received, .skip_emulated_instruction = skip_emulated_instruction, .set_interrupt_shadow = vmx_set_interrupt_shadow, .get_interrupt_shadow = vmx_get_interrupt_shadow, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3c5bb6f..28dba95 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6977,6 +6977,12 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { + if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED && + kvm_x86_ops->handle_init_received(vcpu)) { + /* nested vmexit, L1 is runnable now */ + vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; + return 1; + } return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted) || !list_empty_careful(&vcpu->async_pf.done) -- 1.7.3.4 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html