Wed, Jul 10, 2024 at 01:47:22PM CEST, mst@xxxxxxxxxx wrote: >On Wed, Jul 10, 2024 at 08:36:00AM +0200, Jiri Pirko wrote: >> From: Jiri Pirko <jiri@xxxxxxxxxx> >> >> Currently, the code waits in a busy loop on every admin virtqueue issued >> command to get a reply. That prevents callers from issuing multiple >> commands in parallel. >> >> To overcome this limitation, introduce a virtqueue event callback for >> admin virtqueue. For every issued command, use completion mechanism >> to wait on a reply. In the event callback, trigger the completion >> is done for every incoming reply. >> >> Alongside with that, introduce a spin lock to protect the admin >> virtqueue operations. >> >> Signed-off-by: Jiri Pirko <jiri@xxxxxxxxxx> >> --- >> v1->v2: >> - rebased on top of newly added patches >> - rebased on top of changes in previous patches (vq info, vqs[]) >> - removed WARN_ON_ONCE() when calling virtqueue_kick() >> - added virtqueue_is_broken check in virtqueue_exec_admin_cmd() loop >> - added vp_modern_avq_cleanup() implementation to handle surprise >> removal case >> --- >> drivers/virtio/virtio_pci_common.c | 13 ++++-- >> drivers/virtio/virtio_pci_common.h | 3 ++ >> drivers/virtio/virtio_pci_modern.c | 74 +++++++++++++++++++++++++----- >> include/linux/virtio.h | 3 ++ >> 4 files changed, 77 insertions(+), 16 deletions(-) >> >> diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c >> index 267643bb1cd5..c44d8ba00c02 100644 >> --- a/drivers/virtio/virtio_pci_common.c >> +++ b/drivers/virtio/virtio_pci_common.c >> @@ -395,6 +395,8 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs, >> if (vqi->name && vqi->callback) >> ++nvectors; >> } >> + if (avq_num && vector_policy == VP_VQ_VECTOR_POLICY_EACH) >> + ++nvectors; >> } else { >> /* Second best: one for change, shared for all vqs. */ >> nvectors = 2; >> @@ -425,9 +427,9 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs, >> if (!avq_num) >> return 0; >> sprintf(avq->name, "avq.%u", avq->vq_index); >> - vq = vp_find_one_vq_msix(vdev, avq->vq_index, NULL, avq->name, false, >> - true, &allocated_vectors, vector_policy, >> - &vp_dev->admin_vq.info); >> + vq = vp_find_one_vq_msix(vdev, avq->vq_index, vp_modern_avq_done, >> + avq->name, false, true, &allocated_vectors, >> + vector_policy, &vp_dev->admin_vq.info); >> if (IS_ERR(vq)) { >> err = PTR_ERR(vq); >> goto error_find; >> @@ -486,8 +488,9 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs, >> if (!avq_num) >> return 0; >> sprintf(avq->name, "avq.%u", avq->vq_index); >> - vq = vp_setup_vq(vdev, queue_idx++, NULL, avq->name, false, >> - VIRTIO_MSI_NO_VECTOR, &vp_dev->admin_vq.info); >> + vq = vp_setup_vq(vdev, queue_idx++, vp_modern_avq_done, avq->name, >> + false, VIRTIO_MSI_NO_VECTOR, >> + &vp_dev->admin_vq.info); >> if (IS_ERR(vq)) { >> err = PTR_ERR(vq); >> goto out_del_vqs; >> diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h >> index de59bb06ec3c..90df381fbbcf 100644 >> --- a/drivers/virtio/virtio_pci_common.h >> +++ b/drivers/virtio/virtio_pci_common.h >> @@ -47,6 +47,8 @@ struct virtio_pci_admin_vq { >> struct virtio_pci_vq_info *info; >> /* serializing admin commands execution. */ >> struct mutex cmd_lock; >> + /* Protects virtqueue access. */ >> + spinlock_t lock; >> u64 supported_cmds; >> /* Name of the admin queue: avq.$vq_index. */ >> char name[10]; >> @@ -178,6 +180,7 @@ struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev); >> #define VIRTIO_ADMIN_CMD_BITMAP 0 >> #endif >> >> +void vp_modern_avq_done(struct virtqueue *vq); >> int vp_modern_admin_cmd_exec(struct virtio_device *vdev, >> struct virtio_admin_cmd *cmd); >> >> diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c >> index 0fd344d1eaf9..608df3263df1 100644 >> --- a/drivers/virtio/virtio_pci_modern.c >> +++ b/drivers/virtio/virtio_pci_modern.c >> @@ -53,6 +53,23 @@ static bool vp_is_avq(struct virtio_device *vdev, unsigned int index) >> return index == vp_dev->admin_vq.vq_index; >> } >> >> +void vp_modern_avq_done(struct virtqueue *vq) >> +{ >> + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); >> + struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq; >> + struct virtio_admin_cmd *cmd; >> + unsigned long flags; >> + unsigned int len; >> + >> + spin_lock_irqsave(&admin_vq->lock, flags); >> + do { >> + virtqueue_disable_cb(vq); >> + while ((cmd = virtqueue_get_buf(vq, &len))) >> + complete(&cmd->completion); >> + } while (!virtqueue_enable_cb(vq)); >> + spin_unlock_irqrestore(&admin_vq->lock, flags); >> +} >> + >> static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq, >> u16 opcode, >> struct scatterlist **sgs, >> @@ -61,7 +78,8 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq, >> struct virtio_admin_cmd *cmd) >> { >> struct virtqueue *vq; >> - int ret, len; >> + unsigned long flags; >> + int ret; >> >> vq = admin_vq->info->vq; >> if (!vq) >> @@ -72,21 +90,33 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq, >> !((1ULL << opcode) & admin_vq->supported_cmds)) >> return -EOPNOTSUPP; >> >> - ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL); >> - if (ret < 0) >> - return -EIO; >> + init_completion(&cmd->completion); >> >> - if (unlikely(!virtqueue_kick(vq))) >> +again: >> + if (virtqueue_is_broken(vq)) >> return -EIO; >> >> - while (!virtqueue_get_buf(vq, &len) && >> - !virtqueue_is_broken(vq)) >> - cpu_relax(); >> + spin_lock_irqsave(&admin_vq->lock, flags); >> + ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL); >> + if (ret < 0) { >> + if (ret == -ENOSPC) { >> + spin_unlock_irqrestore(&admin_vq->lock, flags); >> + cpu_relax(); >> + goto again; >> + } >> + goto unlock_err; >> + } >> + if (!virtqueue_kick(vq)) >> + goto unlock_err; >> + spin_unlock_irqrestore(&admin_vq->lock, flags); >> >> - if (virtqueue_is_broken(vq)) >> - return -EIO; >> + wait_for_completion(&cmd->completion); >> >> - return 0; >> + return cmd->ret; >> + >> +unlock_err: >> + spin_unlock_irqrestore(&admin_vq->lock, flags); >> + return -EIO; >> } >> >> int vp_modern_admin_cmd_exec(struct virtio_device *vdev, >> @@ -209,6 +239,25 @@ static void vp_modern_avq_activate(struct virtio_device *vdev) >> virtio_pci_admin_cmd_list_init(vdev); >> } >> >> +static void vp_modern_avq_cleanup(struct virtio_device *vdev) >> +{ >> + struct virtio_pci_device *vp_dev = to_vp_device(vdev); >> + struct virtio_admin_cmd *cmd; >> + struct virtqueue *vq; >> + >> + if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) >> + return; >> + >> + vq = vp_dev->vqs[vp_dev->admin_vq.vq_index]->vq; >> + if (!vq) >> + return; >> + >> + while ((cmd = virtqueue_detach_unused_buf(vq))) { >> + cmd->ret = -EIO; >> + complete(&cmd->completion); >> + } >> +} >> + > > >I think surprise removal is still broken with this. Why do you think so? I will drain the remaining buffers from the queue and complete them all. What's missing? >You need to add a callback and signal the completion. Callback to what? > >-- >MST >