From: Julian Stecklina <jsteckli@xxxxxxxxx> ACK notifiers don't work with AMD AVIC when the PIT interrupt is delivered as edge-triggered fixed interrupt via the IOAPIC. AMD processors cannot exit on EOI for these interrupts. The ACK notifiers do work when the interrupt is delivered via PIC as ExtINT, because the ACK comes as PIO write that KVM sees. Change the PIT code to not rely on the ACK notifiers. The IRQ ACK notifier in the PIT emulation re-schedules pit->expired to reinject any pending PIT interrupt. This seems useless, because we can pulse the PIT interrupt even when the interrupt is not ACKed yet. This means any timer expiry when the interrupt was being handled by the guest, will cause an interrupt to be injected automatically when the interrupt is ACKed. Reviewed-by: Filippo Sironi <sironi@xxxxxxxxx> Signed-off-by: Julian Stecklina <jsteckli@xxxxxxxxx> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> --- arch/x86/kvm/i8254.c | 22 +--------------------- arch/x86/kvm/i8254.h | 2 -- 2 files changed, 1 insertion(+), 23 deletions(-) diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index af192895b1fc..a8f6eb0ac1a0 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -200,21 +200,6 @@ static inline struct kvm_pit *pit_state_to_pit(struct kvm_kpit_state *ps) return container_of(ps, struct kvm_pit, pit_state); } -static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) -{ - struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state, - irq_ack_notifier); - struct kvm_pit *pit = pit_state_to_pit(ps); - - atomic_set(&ps->irq_ack, 1); - /* irq_ack should be set before pending is read. Order accesses with - * inc(pending) in pit_timer_fn and xchg(irq_ack, 0) in pit_do_work. - */ - smp_mb(); - if (atomic_dec_if_positive(&ps->pending) > 0) - kthread_queue_work(pit->worker, &pit->expired); -} - void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) { struct kvm_pit *pit = vcpu->kvm->arch.vpit; @@ -244,7 +229,7 @@ static void pit_do_work(struct kthread_work *work) int i; struct kvm_kpit_state *ps = &pit->pit_state; - if (atomic_read(&ps->reinject) && !atomic_xchg(&ps->irq_ack, 0)) + if (atomic_read(&ps->reinject) && !atomic_xchg(&ps->pending, 0)) return; kvm_set_irq(kvm, pit->irq_source_id, 0, 1, false); @@ -284,7 +269,6 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) static inline void kvm_pit_reset_reinject(struct kvm_pit *pit) { atomic_set(&pit->pit_state.pending, 0); - atomic_set(&pit->pit_state.irq_ack, 1); } void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject) @@ -298,10 +282,8 @@ void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject) if (reinject) { /* The initial state is preserved while ps->reinject == 0. */ kvm_pit_reset_reinject(pit); - kvm_register_irq_ack_notifier(kvm, &ps->irq_ack_notifier); kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); } else { - kvm_unregister_irq_ack_notifier(kvm, &ps->irq_ack_notifier); kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier); } @@ -679,8 +661,6 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) hrtimer_init(&pit_state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); pit_state->timer.function = pit_timer_fn; - pit_state->irq_ack_notifier.gsi = 0; - pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq; pit->mask_notifier.func = pit_mask_notifer; kvm_pit_reset(pit); diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index 394d9527da7e..4a9cfe00306f 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h @@ -34,8 +34,6 @@ struct kvm_kpit_state { struct mutex lock; atomic_t reinject; atomic_t pending; /* accumulated triggered timers */ - atomic_t irq_ack; - struct kvm_irq_ack_notifier irq_ack_notifier; }; struct kvm_pit { -- 2.17.1