On Wed, Apr 10, 2013 at 09:22:20PM +0800, Yang Zhang wrote: > From: Yang Zhang <yang.z.zhang@xxxxxxxxx> > > Current interrupt coalescing logci which only used by RTC has conflict > with Posted Interrupt. > This patch introduces a new mechinism to use eoi to track interrupt: > When delivering an interrupt to vcpu, the pending_eoi set to number of > vcpu that received the interrupt. And decrease it when each vcpu writing > eoi. No subsequent RTC interrupt can deliver to vcpu until all vcpus > write eoi. > > Signed-off-by: Yang Zhang <yang.z.zhang@xxxxxxxxx> > --- > virt/kvm/ioapic.c | 39 ++++++++++++++++++++++++++++++++++++++- > 1 files changed, 38 insertions(+), 1 deletions(-) > > diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c > index a49fcd5..aeac154 100644 > --- a/virt/kvm/ioapic.c > +++ b/virt/kvm/ioapic.c > @@ -147,6 +147,26 @@ static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic) > __rtc_irq_eoi_tracking_restore_one(vcpu); > } > > +static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu) > +{ > + if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map)) > + --ioapic->rtc_status.pending_eoi; > + > + WARN_ON(ioapic->rtc_status.pending_eoi < 0); > +} > + > +static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic, int irq, > + bool line_status) > +{ > + if (irq != RTC_GSI || !line_status) > + return false; Please move the check from rtc_irq_check_coalesced() to kvm_ioapic_set_irq() like this: if (irq == RTC_GSI && line_status && rtc_irq_check_coalesced(ioapic, irq, line_status)) .... I was going to fix it myself while applying, but since there will be new posted interrupt series anyway you can as well fix this one too. > + > + if (ioapic->rtc_status.pending_eoi > 0) > + return true; /* coalesced */ > + > + return false; > +} > + > static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx, > bool line_status) > { > @@ -260,6 +280,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq, bool line_status) > { > union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; > struct kvm_lapic_irq irqe; > + int ret; > > ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " > "vector=%x trig_mode=%x\n", > @@ -275,7 +296,15 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq, bool line_status) > irqe.level = 1; > irqe.shorthand = 0; > > - return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL); > + if (irq == RTC_GSI && line_status) { > + BUG_ON(ioapic->rtc_status.pending_eoi != 0); > + ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, > + ioapic->rtc_status.dest_map); > + ioapic->rtc_status.pending_eoi = ret; > + } else > + ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL); > + > + return ret; > } > > int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, > @@ -299,6 +328,11 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, > ret = 1; > } else { > int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); > + > + if (rtc_irq_check_coalesced(ioapic, irq, line_status)) { > + ret = 0; /* coalesced */ > + goto out; > + } > ioapic->irr |= mask; > if ((edge && old_irr != ioapic->irr) || > (!edge && !entry.fields.remote_irr)) > @@ -306,6 +340,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, > else > ret = 0; /* report coalesced interrupt */ > } > +out: > trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); > spin_unlock(&ioapic->lock); > > @@ -333,6 +368,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, > if (ent->fields.vector != vector) > continue; > > + if (i == RTC_GSI) > + rtc_irq_eoi(ioapic, vcpu); > /* > * We are dropping lock while calling ack notifiers because ack > * notifier callbacks for assigned devices call into IOAPIC > -- > 1.7.1 -- Gleb. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html