On Wed, Jul 21, 2010 at 08:37:26AM +0300, Avi Kivity wrote: > On 07/21/2010 03:55 AM, Marcelo Tosatti wrote: > > > >>--- a/arch/x86/kvm/x86.c > >>+++ b/arch/x86/kvm/x86.c > >>@@ -4709,6 +4709,19 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) > >> if (unlikely(r)) > >> goto out; > >> > >>+ inject_pending_event(vcpu); > >>+ > >>+ /* enable NMI/IRQ window open exits if needed */ > >>+ if (vcpu->arch.nmi_pending) > >>+ kvm_x86_ops->enable_nmi_window(vcpu); > >>+ else if (kvm_cpu_has_interrupt(vcpu) || req_int_win) > >>+ kvm_x86_ops->enable_irq_window(vcpu); > >>+ > >>+ if (kvm_lapic_enabled(vcpu)) { > >>+ update_cr8_intercept(vcpu); > >>+ kvm_lapic_sync_to_vapic(vcpu); > >>+ } > >>+ > >> preempt_disable(); > >> > >> kvm_x86_ops->prepare_guest_switch(vcpu); > >>@@ -4727,23 +4740,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) > >> smp_wmb(); > >> local_irq_enable(); > >> preempt_enable(); > >>+ kvm_x86_ops->cancel_injection(vcpu); > >> r = 1; > >> goto out; > >> } > >> > >>- inject_pending_event(vcpu); > >>- > >>- /* enable NMI/IRQ window open exits if needed */ > >>- if (vcpu->arch.nmi_pending) > >>- kvm_x86_ops->enable_nmi_window(vcpu); > >>- else if (kvm_cpu_has_interrupt(vcpu) || req_int_win) > >>- kvm_x86_ops->enable_irq_window(vcpu); > >>- > >>- if (kvm_lapic_enabled(vcpu)) { > >>- update_cr8_intercept(vcpu); > >>- kvm_lapic_sync_to_vapic(vcpu); > >>- } > >>- > >> srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); > >> > >> kvm_guest_enter(); > >This breaks > > > >int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) > >{ > > struct kvm_lapic *apic = vcpu->arch.apic; > > int highest_irr; > > > > /* This may race with setting of irr in __apic_accept_irq() and > > * value returned may be wrong, but kvm_vcpu_kick() in > > * __apic_accept_irq > > * will cause vmexit immediately and the value will be > > * recalculated > > * on the next vmentry. > > */ > > > >(also valid for nmi_pending and PIC). Can't simply move > >atomic_set(guest_mode, 1) in preemptible section as that would make it > >possible for kvm_vcpu_kick to IPI stale vcpu->cpu. > > Right. Can fix by adding a kvm_make_request() to force the retry loop. > > >Also should undo vmx.rmode.* ? > > Elaborate? Undo vmx.rmode assignments on cancel_injection. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html