Re: [PATCH 13/15] Add NMI injection support to SVM.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Gleb Natapov wrote:
> Signed-off-by: Gleb Natapov <gleb@xxxxxxxxxx>
> ---
>  arch/x86/include/asm/kvm_host.h |    1 +
>  arch/x86/kvm/svm.c              |   49 +++++++++++++++++++++++++++++++++++++-
>  2 files changed, 48 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 8b6f6e9..057a612 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -766,6 +766,7 @@ enum {
>  #define HF_GIF_MASK		(1 << 0)
>  #define HF_HIF_MASK		(1 << 1)
>  #define HF_VINTR_MASK		(1 << 2)
> +#define HF_NMI_MASK		(1 << 3)
>  
>  /*
>   * Hardware virtualization extension instructions may fault if a
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index c605477..cd60fd7 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -1834,6 +1834,13 @@ static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
>  	return 1;
>  }
>  
> +static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
> +{
> +	svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
> +	svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
> +	return 0;
> +}
> +
>  static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
>  {
>  	if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE)
> @@ -2111,6 +2118,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
>  	[SVM_EXIT_VINTR]			= interrupt_window_interception,
>  	/* [SVM_EXIT_CR0_SEL_WRITE]		= emulate_on_interception, */
>  	[SVM_EXIT_CPUID]			= cpuid_interception,
> +	[SVM_EXIT_IRET]                         = iret_interception,
>  	[SVM_EXIT_INVD]                         = emulate_on_interception,
>  	[SVM_EXIT_HLT]				= halt_interception,
>  	[SVM_EXIT_INVLPG]			= invlpg_interception,
> @@ -2218,6 +2226,11 @@ static void pre_svm_run(struct vcpu_svm *svm)
>  		new_asid(svm, svm_data);
>  }
>  
> +static void svm_inject_nmi(struct vcpu_svm *svm)
> +{
> +	svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
> +	svm->vcpu.arch.hflags |= HF_NMI_MASK;
> +}
>  
>  static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
>  {
> @@ -2269,6 +2282,14 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
>  		vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
>  }
>  
> +static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
> +{
> +	struct vcpu_svm *svm = to_svm(vcpu);
> +	struct vmcb *vmcb = svm->vmcb;
> +	return !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
> +		!(svm->vcpu.arch.hflags & HF_NMI_MASK);
> +}
> +
>  static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
>  {
>  	struct vcpu_svm *svm = to_svm(vcpu);
> @@ -2284,16 +2305,37 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
>  	svm_inject_irq(to_svm(vcpu), 0x0);
>  }
>  
> +static void enable_nmi_window(struct kvm_vcpu *vcpu)
> +{
> +	struct vcpu_svm *svm = to_svm(vcpu);
> +
> +	if (svm->vcpu.arch.hflags & HF_NMI_MASK)
> +		svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
> +	if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
> +		enable_irq_window(vcpu);
> +}
> +
>  static void svm_intr_inject(struct kvm_vcpu *vcpu)
>  {
>  	/* try to reinject previous events if any */
> +	if (vcpu->arch.nmi_injected) {
> +		svm_inject_nmi(to_svm(vcpu));
> +		return;
> +	}
> +
>  	if (vcpu->arch.interrupt.pending) {
>  		svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr);
>  		return;
>  	}
>  
>  	/* try to inject new event if pending */
> -	if (kvm_cpu_has_interrupt(vcpu)) {
> +	if (vcpu->arch.nmi_pending) {
> +		if (svm_nmi_allowed(vcpu)) {
> +			vcpu->arch.nmi_pending = false;
> +			vcpu->arch.nmi_injected = true;
> +			svm_inject_nmi(vcpu);
> +		}
> +	} else if (kvm_cpu_has_interrupt(vcpu)) {

Strictly spoken, this 'else' is incorrect: If we have an NMI pending
while the NMI window is closed _but_ the guest decided to open the IRQ
window, there is no reason why we shouldn't inject an IRQ. Only if we
actually injected an NMI, pending IRQs should be skipped for this run.

>  		if (svm_interrupt_allowed(vcpu)) {
>  			kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
>  			svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr);
> @@ -2312,7 +2354,10 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
>  
>  	svm_intr_inject(vcpu);
>  
> -	if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
> +	/* enable NMI/IRQ window open exits if needed */
> +	if (vcpu->arch.nmi_pending)
> +		enable_nmi_window(vcpu);
> +	else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
>  		enable_irq_window(vcpu);
>  
>  out:

Jan

-- 
Siemens AG, Corporate Technology, CT SE 2
Corporate Competence Center Embedded Linux
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux