Separate the irqfd_wakeup_pollin/irqfd_wakeup_pollup from the irqfd_wakeup, so that we can reuse the logic for MSI fastpath injection. Signed-off-by: Yunhong Jiang <yunhong.jiang@xxxxxxxxxxxxxxx> --- virt/kvm/eventfd.c | 86 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 51 insertions(+), 35 deletions(-) diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 46dbc0a7dfc1..c31d43b762db 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -180,6 +180,53 @@ int __attribute__((weak)) kvm_arch_set_irq_inatomic( return -EWOULDBLOCK; } +static int +irqfd_wakeup_pollin(struct kvm_kernel_irqfd *irqfd) +{ + struct kvm *kvm = irqfd->kvm; + struct kvm_kernel_irq_routing_entry irq; + unsigned seq; + int idx, ret; + + idx = srcu_read_lock(&kvm->irq_srcu); + do { + seq = read_seqcount_begin(&irqfd->irq_entry_sc); + irq = irqfd->irq_entry; + } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq)); + /* An event has been signaled, inject an interrupt */ + ret = kvm_arch_set_irq_inatomic(&irq, kvm, + KVM_USERSPACE_IRQ_SOURCE_ID, 1, + false); + srcu_read_unlock(&kvm->irq_srcu, idx); + + return ret; +} + +static int +irqfd_wakeup_pollup(struct kvm_kernel_irqfd *irqfd) +{ + struct kvm *kvm = irqfd->kvm; + unsigned long flags; + + spin_lock_irqsave(&kvm->irqfds.lock, flags); + + /* + * We must check if someone deactivated the irqfd before + * we could acquire the irqfds.lock since the item is + * deactivated from the KVM side before it is unhooked from + * the wait-queue. If it is already deactivated, we can + * simply return knowing the other side will cleanup for us. + * We cannot race against the irqfd going away since the + * other side is required to acquire wqh->lock, which we hold + */ + if (irqfd_is_active(irqfd)) + irqfd_deactivate(irqfd); + + spin_unlock_irqrestore(&kvm->irqfds.lock, flags); + + return 0; +} + /* * Called with wqh->lock held and interrupts disabled */ @@ -189,45 +236,14 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) struct kvm_kernel_irqfd *irqfd = container_of(wait, struct kvm_kernel_irqfd, wait); unsigned long flags = (unsigned long)key; - struct kvm_kernel_irq_routing_entry irq; - struct kvm *kvm = irqfd->kvm; - unsigned seq; - int idx; - if (flags & POLLIN) { - idx = srcu_read_lock(&kvm->irq_srcu); - do { - seq = read_seqcount_begin(&irqfd->irq_entry_sc); - irq = irqfd->irq_entry; - } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq)); - /* An event has been signaled, inject an interrupt */ - if (kvm_arch_set_irq_inatomic(&irq, kvm, - KVM_USERSPACE_IRQ_SOURCE_ID, 1, - false) == -EWOULDBLOCK) + if (flags & POLLIN) + if (irqfd_wakeup_pollin(irqfd) == -EWOULDBLOCK) schedule_work(&irqfd->inject); - srcu_read_unlock(&kvm->irq_srcu, idx); - } - if (flags & POLLHUP) { + if (flags & POLLHUP) /* The eventfd is closing, detach from KVM */ - unsigned long flags; - - spin_lock_irqsave(&kvm->irqfds.lock, flags); - - /* - * We must check if someone deactivated the irqfd before - * we could acquire the irqfds.lock since the item is - * deactivated from the KVM side before it is unhooked from - * the wait-queue. If it is already deactivated, we can - * simply return knowing the other side will cleanup for us. - * We cannot race against the irqfd going away since the - * other side is required to acquire wqh->lock, which we hold - */ - if (irqfd_is_active(irqfd)) - irqfd_deactivate(irqfd); - - spin_unlock_irqrestore(&kvm->irqfds.lock, flags); - } + irqfd_wakeup_pollup(irqfd); return 0; } -- 1.8.3.1 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html