When getting an IRQ from the local APIC, don't move the vector to the ISR and skip the PPR update if the found vector is the vCPU's nested posted interrupt notification vector, i.e. if the IRQ should trigger posted interrupt processing in L2 instead of being deliver to L1. For now, pass in -1 from all callers and defer passing the actual nested notification vector to a separate patch, as more prep work is needed. Functionally, this should be a glorified nop, i.e. no true functional change intended. Cc: stable@xxxxxxxxxxxxxxx Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/irq.c | 6 +++--- arch/x86/kvm/lapic.c | 12 ++++++++++-- arch/x86/kvm/lapic.h | 2 +- arch/x86/kvm/vmx/nested.c | 2 +- arch/x86/kvm/x86.c | 2 +- 6 files changed, 17 insertions(+), 9 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 950a03e0181e..b40703f05b27 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -2251,7 +2251,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); int kvm_cpu_has_extint(struct kvm_vcpu *v); int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); -int kvm_cpu_get_interrupt(struct kvm_vcpu *v); +int kvm_cpu_get_interrupt(struct kvm_vcpu *v, int nested_pi_nv); void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index 3d7eb11d0e45..69d04d80f143 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -135,13 +135,13 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v) /* * Read pending interrupt vector and intack. */ -int kvm_cpu_get_interrupt(struct kvm_vcpu *v) +int kvm_cpu_get_interrupt(struct kvm_vcpu *v, int nested_pi_nv) { int vector = kvm_cpu_get_extint(v); if (vector != -1) - return vector; /* PIC */ + return vector; /* PIC */ - return kvm_get_apic_interrupt(v); /* APIC */ + return kvm_get_apic_interrupt(v, nested_pi_nv); /* APIC */ } EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index a7172ba59ad2..c5c4473f50f6 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2924,7 +2924,7 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) } } -int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) +int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu, int nested_pi_nv) { int vector = kvm_apic_has_interrupt(vcpu); struct kvm_lapic *apic = vcpu->arch.apic; @@ -2939,8 +2939,16 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) * on exit" mode. Then we cannot inject the interrupt via RVI, * because the process would deliver it through the IDT. */ - apic_clear_irr(vector, apic); + + /* + * If the vector is L2's posted interrupt notification vector, return + * without moving the vector to the ISR, as notification interrupts + * trigger processing in L2, i.e. aren't delivered to L1. + */ + if (vector == nested_pi_nv) + return vector; + if (kvm_hv_synic_auto_eoi_set(vcpu, vector)) { /* * For auto-EOI interrupts, there might be another pending diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 7ef8ae73e82d..c8ff3bd2ce2c 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -89,7 +89,7 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu); int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu); int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu); -int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu); +int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu, int nested_pi_nv); int kvm_apic_accept_events(struct kvm_vcpu *vcpu); void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event); u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index b042b70560f2..7e0a944088eb 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -4294,7 +4294,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu) if (nested_exit_intr_ack_set(vcpu)) { int irq; - irq = kvm_cpu_get_interrupt(vcpu); + irq = kvm_cpu_get_interrupt(vcpu, -1); if (WARN_ON_ONCE(irq < 0)) goto no_vmexit; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index af6c8cf6a37a..4c14ea000e89 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10548,7 +10548,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu, if (r < 0) goto out; if (r) { - int irq = kvm_cpu_get_interrupt(vcpu); + int irq = kvm_cpu_get_interrupt(vcpu, -1); if (!WARN_ON_ONCE(irq == -1)) { kvm_queue_interrupt(vcpu, irq, false); -- 2.45.2.1089.g2a221341d9-goog