Hi Reinette, > @@ -409,33 +416,62 @@ static int vfio_msi_set_vector_signal(struct > vfio_pci_core_device *vdev, { > struct pci_dev *pdev = vdev->pdev; > struct vfio_pci_irq_ctx *ctx; > + struct msi_map msix_map = {}; > + bool allow_dyn_alloc = false; > struct eventfd_ctx *trigger; > + bool new_ctx = false; > int irq, ret; > u16 cmd; > > + /* Only MSI-X allows dynamic allocation. */ > + if (msix && pci_msix_can_alloc_dyn(vdev->pdev)) > + allow_dyn_alloc = true; > + > ctx = vfio_irq_ctx_get(vdev, vector); > - if (!ctx) > + if (!ctx && !allow_dyn_alloc) > return -EINVAL; > + > irq = pci_irq_vector(pdev, vector); > + /* Context and interrupt are always allocated together. */ > + WARN_ON((ctx && irq == -EINVAL) || (!ctx && irq != -EINVAL)); > > - if (ctx->trigger) { > + if (ctx && ctx->trigger) { > irq_bypass_unregister_producer(&ctx->producer); > > cmd = vfio_pci_memory_lock_and_enable(vdev); > free_irq(irq, ctx->trigger); > + if (allow_dyn_alloc) { > + msix_map.index = vector; > + msix_map.virq = irq; > + pci_msix_free_irq(pdev, msix_map); > + irq = -EINVAL; > + } > vfio_pci_memory_unlock_and_restore(vdev, cmd); > kfree(ctx->name); > eventfd_ctx_put(ctx->trigger); > ctx->trigger = NULL; > + if (allow_dyn_alloc) { > + vfio_irq_ctx_free(vdev, ctx, vector); > + ctx = NULL; > + } > } > > if (fd < 0) > return 0; > While looking at this piece of code, one thought comes to me: It might be possible that userspace comes twice with the same valid fd for a specific vector, this vfio code will free the resource in if(ctx && ctx->trigger) for the second time, and then re-alloc again for the same fd given by userspace. Would that help if we firstly check e.g. ctx->trigger with the given valid fd, to see if the trigger is really changed or not? Thanks, Jing > + if (!ctx) { > + ctx = vfio_irq_ctx_alloc_single(vdev, vector); > + if (!ctx) > + return -ENOMEM; > + new_ctx = true; > + } > + > ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-msi%s[%d](%s)", > msix ? "x" : "", vector, pci_name(pdev)); > - if (!ctx->name) > - return -ENOMEM; > + if (!ctx->name) { > + ret = -ENOMEM; > + goto out_free_ctx; > + } > > trigger = eventfd_ctx_fdget(fd); > if (IS_ERR(trigger)) { > @@ -443,25 +479,38 @@ static int vfio_msi_set_vector_signal(struct > vfio_pci_core_device *vdev, > goto out_free_name; > } > > - /* > - * The MSIx vector table resides in device memory which may be cleared > - * via backdoor resets. We don't allow direct access to the vector > - * table so even if a userspace driver attempts to save/restore around > - * such a reset it would be unsuccessful. To avoid this, restore the > - * cached value of the message prior to enabling. > - */ > cmd = vfio_pci_memory_lock_and_enable(vdev); > if (msix) { > - struct msi_msg msg; > - > - get_cached_msi_msg(irq, &msg); > - pci_write_msi_msg(irq, &msg); > + if (irq == -EINVAL) { > + msix_map = pci_msix_alloc_irq_at(pdev, vector, NULL); > + if (msix_map.index < 0) { > + vfio_pci_memory_unlock_and_restore(vdev, > cmd); > + ret = msix_map.index; > + goto out_put_eventfd_ctx; > + } > + irq = msix_map.virq; > + } else { > + /* > + * The MSIx vector table resides in device memory > which > + * may be cleared via backdoor resets. We don't allow > + * direct access to the vector table so even if a > + * userspace driver attempts to save/restore around > + * such a reset it would be unsuccessful. To avoid > + * this, restore the cached value of the message prior > + * to enabling. > + */ > + struct msi_msg msg; > + > + get_cached_msi_msg(irq, &msg); > + pci_write_msi_msg(irq, &msg); > + } > } > > ret = request_irq(irq, vfio_msihandler, 0, ctx->name, trigger); > - vfio_pci_memory_unlock_and_restore(vdev, cmd); > if (ret) > - goto out_put_eventfd_ctx; > + goto out_free_irq_locked; > + > + vfio_pci_memory_unlock_and_restore(vdev, cmd); > > ctx->producer.token = trigger; > ctx->producer.irq = irq; > @@ -477,11 +526,21 @@ static int vfio_msi_set_vector_signal(struct > vfio_pci_core_device *vdev, > > return 0; > > +out_free_irq_locked: > + if (allow_dyn_alloc && new_ctx) { > + msix_map.index = vector; > + msix_map.virq = irq; > + pci_msix_free_irq(pdev, msix_map); > + } > + vfio_pci_memory_unlock_and_restore(vdev, cmd); > out_put_eventfd_ctx: > eventfd_ctx_put(trigger); > out_free_name: > kfree(ctx->name); > ctx->name = NULL; > +out_free_ctx: > + if (allow_dyn_alloc && new_ctx) > + vfio_irq_ctx_free(vdev, ctx, vector); > return ret; > } > > -- > 2.34.1