Re: [PATCH v2] KVM: Fix simultaneous NMIs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, Sep 20, 2011 at 01:43:14PM +0300, Avi Kivity wrote:
> If simultaneous NMIs happen, we're supposed to queue the second
> and next (collapsing them), but currently we sometimes collapse
> the second into the first.
> 
> Fix by using a counter for pending NMIs instead of a bool; since
> the counter limit depends on whether the processor is currently
> in an NMI handler, which can only be checked in vcpu context
> (via the NMI mask), we add a new KVM_REQ_NMI to request recalculation
> of the counter.
> 
> Signed-off-by: Avi Kivity <avi@xxxxxxxxxx>
> ---
>  arch/x86/include/asm/kvm_host.h |    5 ++-
>  arch/x86/kvm/x86.c              |   48 +++++++++++++++++++++++++-------------
>  include/linux/kvm_host.h        |    1 +
>  3 files changed, 35 insertions(+), 19 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 6ab4241..ab62711 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -413,8 +413,9 @@ struct kvm_vcpu_arch {
>  	u32  tsc_catchup_mult;
>  	s8   tsc_catchup_shift;
>  
> -	bool nmi_pending;
> -	bool nmi_injected;
> +	atomic_t nmi_queued;  /* unprocessed asynchronous NMIs */
> +	unsigned nmi_pending; /* NMI queued after currently running handler */
> +	bool nmi_injected;    /* Trying to inject an NMI this entry */
>  
>  	struct mtrr_state_type mtrr_state;
>  	u32 pat;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 6b37f18..d51e407 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -83,6 +83,7 @@
>  static void update_cr8_intercept(struct kvm_vcpu *vcpu);
>  static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
>  				    struct kvm_cpuid_entry2 __user *entries);
> +static void process_nmi(struct kvm_vcpu *vcpu);
>  
>  struct kvm_x86_ops *kvm_x86_ops;
>  EXPORT_SYMBOL_GPL(kvm_x86_ops);
> @@ -359,8 +360,8 @@ void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
>  
>  void kvm_inject_nmi(struct kvm_vcpu *vcpu)
>  {
> -	kvm_make_request(KVM_REQ_EVENT, vcpu);
> -	vcpu->arch.nmi_pending = 1;
> +	atomic_inc(&vcpu->arch.nmi_queued);
> +	kvm_make_request(KVM_REQ_NMI, vcpu);
>  }
>  EXPORT_SYMBOL_GPL(kvm_inject_nmi);
>  
> @@ -2827,6 +2828,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
>  static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
>  					       struct kvm_vcpu_events *events)
>  {
> +	process_nmi(vcpu);
>  	events->exception.injected =
>  		vcpu->arch.exception.pending &&
>  		!kvm_exception_is_soft(vcpu->arch.exception.nr);
> @@ -2844,7 +2846,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
>  			KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
>  
>  	events->nmi.injected = vcpu->arch.nmi_injected;
> -	events->nmi.pending = vcpu->arch.nmi_pending;
> +	events->nmi.pending = vcpu->arch.nmi_pending != 0;
>  	events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
>  	events->nmi.pad = 0;

nmi_queued should also be saved and restored. Not sure if its necessary
though.

Should at least reset nmi_queued somewhere (set_vcpu_events?).

> @@ -2864,6 +2866,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
>  			      | KVM_VCPUEVENT_VALID_SHADOW))
>  		return -EINVAL;
>  
> +	process_nmi(vcpu);

This should be after nmi fields are set, not before?

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux