When L1 wants to send a posted-interrupt to another L1 CPU running L2, it sets the relevant bit in vmx->nested.pi_desc->pir and ON bit in vmx->nested.pi_desc->control. Then it attempts to send a notification-vector IPI to dest L1 CPU. This attempt to send IPI will exit to L0 which will reach vmx_deliver_nested_posted_interrupt() which does the following: 1. If dest L0 CPU is currently running guest (vcpu->mode == IN_GUEST_MODE), it sends a physical IPI of PI nested-notification-vector. 2. It sets KVM_REQ_EVENT in dest vCPU. This is done such that if dest L0 CPU exits from guest to host and therefore doesn't recognize physical IPI (or it wasn't sent), then KVM_REQ_EVENT will be consumed on next vmentry which will call vmx_check_nested_events() which should call (in theory) vmx_complete_nested_posted_interrupt(). That function should see vmx->nested.pi_desc->control ON bit is set and therefore "emulate" posted-interrupt delivery for L1 (sync PIR to IRR in L1 virtual-apic-page & update vmcs02 RVI). The above logic regarding nested-posted-interrupts contains multiple issues: A) Race-condition: On step (1) it is possible sender will see vcpu->mode == IN_GUEST_MODE but before sending physical IPI, the dest CPU will exit to host. Therefore, physical IPI could be received at host which it's handler does nothing. In addition, assume that dest CPU passes the checks for pending kvm requests before sender sets KVM_REQ_EVENT. Therefore, dest CPU will resume L2 without evaluating nested-posted-interrupts. B) vmx_complete_nested_posted_interrupt() is not always called when needed. Even if dest CPU consumed KVM_REQ_EVENT, there is a bug that vmx_check_nested_events() could exit from L2 to L1 before calling vmx_complete_nested_posted_interrupt(). Therefore, on next resume of L1 into L2, nested-posted-interrupts won't be evaluated even though L0 resume L2 (We may resume L2 without having KVM_REQ_EVENT set). This commit removes nested-posted-interrupts processing from check_nested_events() and instead makes sure to process nested-posted-interrupts on vmentry after interrupts disabled. Processing of nested-posted-interrupts is delegated to hardware by issueing a self-IPI of relevant notification-vector which will be delivered to CPU when CPU is in guest. * Bug (A) is solved by the fact that processing of nested-posted-interrupts is not dependent on KVM_REQ_EVENT and happens before every vmentry to L2. * Bug (B) is now trivially solved by processing nested-posted-interrupts before each vmentry to L2 guest. An alternative could have been to just call vmx_complete_nested_posted_interrupt() at this call-site. However, we have decided to go with this approach because: 1. It would require modifying vmx_complete_nested_posted_interrupt() to be able to work with interrupts disabled (not-trivial). 2. We preffer to avoid software-emulation of hardware behavior if it is possible. Fixes: 705699a13994 ("KVM: nVMX: Enable nested posted interrupt processing") Signed-off-by: Liran Alon <liran.alon@xxxxxxxxxx> Co-authored-by: Nikita Leshenko <nikita.leshchenko@xxxxxxxxxx> Reviewed-by: Krish Sadhukhan <krish.sadhukhan@xxxxxxxxxx> Reviewed-by: Liam Merwick <liam.merwick@xxxxxxxxxx> Signed-off-by: Liam Merwick <liam.merwick@xxxxxxxxxx> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/lapic.c | 12 ++----- arch/x86/kvm/lapic.h | 1 - arch/x86/kvm/svm.c | 6 ++++ arch/x86/kvm/vmx.c | 70 ++++++++++++++++------------------------- arch/x86/kvm/x86.c | 13 ++++++-- 6 files changed, 47 insertions(+), 56 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 90c54d079bc1..4b8caff83dc7 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -993,6 +993,7 @@ struct kvm_x86_ops { void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set); void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); + void (*complete_nested_posted_interrupt)(struct kvm_vcpu *vcpu); int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); int (*get_tdp_level)(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 924ac8ce9d50..faa8f296d7ad 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -364,8 +364,10 @@ static u8 count_vectors(void *bitmap) return count; } -bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr) +bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr) { + struct kvm_lapic *apic = vcpu->arch.apic; + void *regs = apic->regs; u32 i, vec; u32 pir_val, irr_val, prev_irr_val; int max_updated_irr; @@ -392,14 +394,6 @@ bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr) return ((max_updated_irr != -1) && (max_updated_irr == *max_irr)); } -EXPORT_SYMBOL_GPL(__kvm_apic_update_irr); - -bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - - return __kvm_apic_update_irr(pir, apic->regs, max_irr); -} EXPORT_SYMBOL_GPL(kvm_apic_update_irr); static inline int apic_search_irr(struct kvm_lapic *apic) diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 56c36014f7b7..0ace7cc57057 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -75,7 +75,6 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len, bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, int short_hand, unsigned int dest, int dest_mode); -bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr); bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr); void kvm_apic_update_ppr(struct kvm_vcpu *vcpu); int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index cf51471d993b..183f7aec29ca 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -4454,6 +4454,10 @@ static int svm_sync_pir_to_irr(struct kvm_vcpu *vcpu) return -1; } +static void svm_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) +{ +} + /* Note: Currently only used by Hyper-V. */ static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) { @@ -5587,6 +5591,8 @@ static int enable_smi_window(struct kvm_vcpu *vcpu) .hwapic_irr_update = svm_hwapic_irr_update, .hwapic_isr_update = svm_hwapic_isr_update, .sync_pir_to_irr = svm_sync_pir_to_irr, + .complete_nested_posted_interrupt = + svm_complete_nested_posted_interrupt, .apicv_post_state_restore = avic_post_state_restore, .set_tss_addr = svm_set_tss_addr, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index cb8c52fb0591..5f82cd9fc500 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -535,12 +535,6 @@ static bool pi_test_and_set_on(struct pi_desc *pi_desc) (unsigned long *)&pi_desc->control); } -static bool pi_test_and_clear_on(struct pi_desc *pi_desc) -{ - return test_and_clear_bit(POSTED_INTR_ON, - (unsigned long *)&pi_desc->control); -} - static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) { return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); @@ -5044,39 +5038,6 @@ static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu) } } - -static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - int max_irr; - void *vapic_page; - u16 status; - - if (!vmx->nested.pi_desc || !vmx->nested.pi_pending) - return; - - vmx->nested.pi_pending = false; - if (!pi_test_and_clear_on(vmx->nested.pi_desc)) - return; - - max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); - if (max_irr != 256) { - vapic_page = kmap(vmx->nested.virtual_apic_page); - __kvm_apic_update_irr(vmx->nested.pi_desc->pir, - vapic_page, &max_irr); - kunmap(vmx->nested.virtual_apic_page); - - status = vmcs_read16(GUEST_INTR_STATUS); - if ((u8)max_irr > ((u8)status & 0xff)) { - status &= ~0xff; - status |= (u8)max_irr; - vmcs_write16(GUEST_INTR_STATUS, status); - } - } - - nested_mark_vmcs12_pages_dirty(vcpu); -} - static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, bool nested) { @@ -5123,14 +5084,13 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, if (is_guest_mode(vcpu) && vector == vmx->nested.posted_intr_nv) { - /* the PIR and ON have been set by L1. */ - kvm_vcpu_trigger_posted_interrupt(vcpu, true); /* * If a posted intr is not recognized by hardware, * we will accomplish it in the next vmentry. */ vmx->nested.pi_pending = true; - kvm_make_request(KVM_REQ_EVENT, vcpu); + /* the PIR and ON have been set by L1. */ + kvm_vcpu_trigger_posted_interrupt(vcpu, true); return 0; } return -1; @@ -5162,6 +5122,24 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) kvm_vcpu_kick(vcpu); } +static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + WARN_ON(!vcpu->arch.apicv_active); + + if (WARN_ON(!is_guest_mode(vcpu)) || !vmx->nested.pi_pending + || !vmx->nested.pi_desc) + return; + + vmx->nested.pi_pending = false; + + if (pi_test_on(vmx->nested.pi_desc)) { + apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), + POSTED_INTR_NESTED_VECTOR); + } +} + /* * Set up the vmcs's constant host-state fields, i.e., host-state fields that * will not change in the lifetime of the guest. @@ -6730,6 +6708,10 @@ static void wakeup_handler(void) */ static void nested_posted_intr_handler(void) { + struct kvm_vcpu *vcpu = kvm_get_current_vcpu(); + + if (vcpu && is_guest_mode(vcpu)) + to_vmx(vcpu)->nested.pi_pending = true; } void vmx_enable_tdp(void) @@ -6830,6 +6812,7 @@ static __init int hardware_setup(void) if (!cpu_has_vmx_apicv()) { enable_apicv = 0; kvm_x86_ops->sync_pir_to_irr = NULL; + kvm_x86_ops->complete_nested_posted_interrupt = NULL; } if (cpu_has_vmx_tsc_scaling()) { @@ -11155,7 +11138,6 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) return 0; } - vmx_complete_nested_posted_interrupt(vcpu); return 0; } @@ -12157,6 +12139,8 @@ static int enable_smi_window(struct kvm_vcpu *vcpu) .hwapic_isr_update = vmx_hwapic_isr_update, .sync_pir_to_irr = vmx_sync_pir_to_irr, .deliver_posted_interrupt = vmx_deliver_posted_interrupt, + .complete_nested_posted_interrupt = + vmx_complete_nested_posted_interrupt, .set_tss_addr = vmx_set_tss_addr, .get_tdp_level = get_ept_level, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index fc08f2cb7aa2..fa088951afc9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6984,11 +6984,18 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) smp_mb__after_srcu_read_unlock(); /* - * This handles the case where a posted interrupt was - * notified with kvm_vcpu_kick. + * In case guest got the posted-interrupt notification + * vector while running in host, we need to make sure + * it arrives to guest. + * For L1 posted-interrupts, we manually sync PIR to IRR. + * For L2 posted-interrupts, we send notification-vector + * again by self IPI such that it will now be received in guest. */ - if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) + if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) { kvm_x86_ops->sync_pir_to_irr(vcpu); + if (is_guest_mode(vcpu)) + kvm_x86_ops->complete_nested_posted_interrupt(vcpu); + } if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || need_resched() || signal_pending(current)) { -- 1.9.1