Re: [PATCH v11 30/40] virtio_pci: support VIRTIO_F_RING_RESET

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




在 2022/6/29 14:56, Xuan Zhuo 写道:
This patch implements virtio pci support for QUEUE RESET.

Performing reset on a queue is divided into these steps:

  1. notify the device to reset the queue
  2. recycle the buffer submitted
  3. reset the vring (may re-alloc)
  4. mmap vring to device, and enable the queue

This patch implements virtio_reset_vq(), virtio_enable_resetq() in the
pci scenario.

Signed-off-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx>
---
  drivers/virtio/virtio_pci_common.c | 12 +++-
  drivers/virtio/virtio_pci_modern.c | 96 ++++++++++++++++++++++++++++++
  drivers/virtio/virtio_ring.c       |  2 +
  include/linux/virtio.h             |  1 +
  4 files changed, 108 insertions(+), 3 deletions(-)

diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index ca51fcc9daab..ad258a9d3b9f 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -214,9 +214,15 @@ static void vp_del_vq(struct virtqueue *vq)
  	struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
  	unsigned long flags;
- spin_lock_irqsave(&vp_dev->lock, flags);
-	list_del(&info->node);
-	spin_unlock_irqrestore(&vp_dev->lock, flags);
+	/*
+	 * If it fails during re-enable reset vq. This way we won't rejoin
+	 * info->node to the queue. Prevent unexpected irqs.
+	 */
+	if (!vq->reset) {
+		spin_lock_irqsave(&vp_dev->lock, flags);
+		list_del(&info->node);
+		spin_unlock_irqrestore(&vp_dev->lock, flags);
+	}
vp_dev->del_vq(info);
  	kfree(info);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 9041d9a41b7d..754e5e10386b 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
  	if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
  			pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
  		__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
+
+	if (features & BIT_ULL(VIRTIO_F_RING_RESET))
+		__virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
  }
/* virtio config->finalize_features() implementation */
@@ -199,6 +202,95 @@ static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
  	return 0;
  }
+static int vp_modern_reset_vq(struct virtqueue *vq)
+{
+	struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+	struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+	struct virtio_pci_vq_info *info;
+	unsigned long flags;
+
+	if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
+		return -ENOENT;
+
+	vp_modern_set_queue_reset(mdev, vq->index);
+
+	info = vp_dev->vqs[vq->index];
+
+	/* delete vq from irq handler */
+	spin_lock_irqsave(&vp_dev->lock, flags);
+	list_del(&info->node);
+	spin_unlock_irqrestore(&vp_dev->lock, flags);
+
+	INIT_LIST_HEAD(&info->node);
+
+	/* For the case where vq has an exclusive irq, to prevent the irq from
+	 * being received again and the pending irq, call synchronize_irq(), and
+	 * break it.
+	 *
+	 * We can't use disable_irq() since it conflicts with the affinity
+	 * managed IRQ that is used by some drivers. So this is done on top of
+	 * IRQ hardening.
+	 *
+	 * In the scenario based on shared interrupts, vq will be searched from
+	 * the queue virtqueues. Since the previous list_del() has been deleted
+	 * from the queue, it is impossible for vq to be called in this case.
+	 * There is no need to close the corresponding interrupt.
+	 */
+	if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR) {
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+		__virtqueue_break(vq);
+#endif
+		synchronize_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
+	}
+
+	vq->reset = true;
+
+	return 0;
+}
+
+static int vp_modern_enable_reset_vq(struct virtqueue *vq)
+{
+	struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+	struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+	struct virtio_pci_vq_info *info;
+	unsigned long flags, index;
+	int err;
+
+	if (!vq->reset)
+		return -EBUSY;
+
+	index = vq->index;
+	info = vp_dev->vqs[index];
+
+	if (vp_modern_get_queue_reset(mdev, index))
+		return -EBUSY;
+
+	if (vp_modern_get_queue_enable(mdev, index))
+		return -EBUSY;
+
+	err = vp_active_vq(vq, info->msix_vector);
+	if (err)
+		return err;
+
+	if (vq->callback) {
+		spin_lock_irqsave(&vp_dev->lock, flags);
+		list_add(&info->node, &vp_dev->virtqueues);
+		spin_unlock_irqrestore(&vp_dev->lock, flags);
+	} else {
+		INIT_LIST_HEAD(&info->node);
+	}
+
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+	if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR)
+		__virtqueue_unbreak(vq);
+#endif
+
+	vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
+	vq->reset = false;
+
+	return 0;
+}
+
  static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
  {
  	return vp_modern_config_vector(&vp_dev->mdev, vector);
@@ -413,6 +505,8 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
  	.set_vq_affinity = vp_set_vq_affinity,
  	.get_vq_affinity = vp_get_vq_affinity,
  	.get_shm_region  = vp_get_shm_region,
+	.reset_vq	 = vp_modern_reset_vq,
+	.enable_reset_vq = vp_modern_enable_reset_vq,
  };
static const struct virtio_config_ops virtio_pci_config_ops = {
@@ -431,6 +525,8 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
  	.set_vq_affinity = vp_set_vq_affinity,
  	.get_vq_affinity = vp_get_vq_affinity,
  	.get_shm_region  = vp_get_shm_region,
+	.reset_vq	 = vp_modern_reset_vq,
+	.enable_reset_vq = vp_modern_enable_reset_vq,
  };
/* the PCI probing function */
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 7b02be7fce67..82b058e8ce34 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -2008,6 +2008,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
  	vq->vq.vdev = vdev;
  	vq->vq.name = name;
  	vq->vq.index = index;
+	vq->vq.reset = false;
  	vq->we_own_ring = true;
  	vq->notify = notify;
  	vq->weak_barriers = weak_barriers;
@@ -2487,6 +2488,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
  	vq->vq.vdev = vdev;
  	vq->vq.name = name;
  	vq->vq.index = index;
+	vq->vq.reset = false;
  	vq->we_own_ring = false;
  	vq->notify = notify;
  	vq->weak_barriers = weak_barriers;
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index dc474a0d48d1..88f21796b1c3 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -33,6 +33,7 @@ struct virtqueue {
  	unsigned int num_free;
  	unsigned int num_max;
  	void *priv;
+	bool reset;
  };


I wonder if it's better to move virtio_ring part into a separate patch.

Other looks good.

Thanks


int virtqueue_add_outbuf(struct virtqueue *vq,




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux