Re: [PATCH 4/5] synchronize irqs in the reset routine

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On 4/26/2023 1:06 PM, Jason Wang wrote:

在 2023/4/1 04:48, Zhu Lingshan 写道:
This commit synchronize irqs of the virtqueues
and config space in the reset routine.
Thus ifcvf_stop_hw() and reset() are refactored as well.

Signed-off-by: Zhu Lingshan <lingshan.zhu@xxxxxxxxx>
---
  drivers/vdpa/ifcvf/ifcvf_base.c | 61 ++++++++++++++++++++++++++-------
  drivers/vdpa/ifcvf/ifcvf_main.c | 45 +++---------------------
  2 files changed, 54 insertions(+), 52 deletions(-)

diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 79e313c5e10e..49949aec20ef 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -170,12 +170,7 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
    void ifcvf_reset(struct ifcvf_hw *hw)
  {
-    hw->config_cb.callback = NULL;
-    hw->config_cb.private = NULL;
-
      ifcvf_set_status(hw, 0);
-    /* flush set_status, make sure VF is stopped, reset */
-    ifcvf_get_status(hw);


If we don't flush or poll how can we know the reset is done?

E.g modern virtio-pci did:

        /* 0 status means a reset. */
        vp_modern_set_status(mdev, 0);
        /* After writing 0 to device_status, the driver MUST wait for a read of
         * device_status to return 0 before reinitializing the device.
         * This will flush out the status write, and flush in device writes,
         * including MSI-X interrupts, if any.
         */
        while (vp_modern_get_status(mdev))
                msleep(1);
        /* Flush pending VQ/configuration callbacks. */
       vp_synchronize_vectors(vdev);
Thanks, I can implement a similar get_status() here.

  }
    u64 ifcvf_get_hw_features(struct ifcvf_hw *hw)
@@ -368,20 +363,62 @@ void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready)
      vp_iowrite16(ready, &cfg->queue_enable);
  }
  -static void ifcvf_hw_disable(struct ifcvf_hw *hw)
+static void synchronize_per_vq_irq(struct ifcvf_hw *hw)
  {
-    u32 i;
+    u16 qid;
  -    ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
-    for (i = 0; i < hw->nr_vring; i++) {
-        ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
+    for (qid = 0; qid < hw->nr_vring; qid++) {
+        if (hw->vring[qid].irq != -EINVAL)
+            synchronize_irq(hw->vring[qid].irq);
      }
  }
  +static void synchronize_vqs_reused_irq(struct ifcvf_hw *hw)
+{
+    if (hw->vqs_reused_irq != -EINVAL)
+        synchronize_irq(hw->vqs_reused_irq);
+}
+
+static void synchronize_vq_irq(struct ifcvf_hw *hw)
+{
+    u8 status = hw->msix_vector_status;
+
+    if (status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+        synchronize_per_vq_irq(hw);
+    else
+        synchronize_vqs_reused_irq(hw);
+}


I wonder if we need to go with such complicated ways,can we synchronize through the vectors like virtio-pci did?

        for (i = 0; i < vp_dev->msix_vectors; ++i)
synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
?
I can record the number of msix_vectors and sycn irq based on it in V2.


+
+static void synchronize_config_irq(struct ifcvf_hw *hw)
+{
+    if (hw->config_irq != -EINVAL)
+        synchronize_irq(hw->config_irq);
+}
+
+static void ifcvf_reset_vring(struct ifcvf_hw *hw)
+{
+    u16 qid;
+
+    for (qid = 0; qid < hw->nr_vring; qid++) {
+        synchronize_vq_irq(hw);

Since IRQ could be shared, this will result extra complexity, like a irq could be flushed multiple times?
No for this code path, E.g., if the all vqs share one irq, it will only be flushed once in synchronize_vqs_reused_irq()

Thanks

Thanks


+ hw->vring[qid].cb.callback = NULL;
+        hw->vring[qid].cb.private = NULL;
+        ifcvf_set_vq_vector(hw, qid, VIRTIO_MSI_NO_VECTOR);
+    }
+}
+
+static void ifcvf_reset_config_handler(struct ifcvf_hw *hw)
+{
+    synchronize_config_irq(hw);
+    hw->config_cb.callback = NULL;
+    hw->config_cb.private = NULL;
+    ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
+}
+
  void ifcvf_stop_hw(struct ifcvf_hw *hw)
  {
-    ifcvf_hw_disable(hw);
-    ifcvf_reset(hw);
+    ifcvf_reset_vring(hw);
+    ifcvf_reset_config_handler(hw);
  }
    void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 968687159e44..15c6157ee841 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -346,33 +346,6 @@ static int ifcvf_request_irq(struct ifcvf_hw *vf)
      return 0;
  }
  -static int ifcvf_stop_datapath(struct ifcvf_adapter *adapter)
-{
-    struct ifcvf_hw *vf = adapter->vf;
-    int i;
-
-    for (i = 0; i < vf->nr_vring; i++)
-        vf->vring[i].cb.callback = NULL;
-
-    ifcvf_stop_hw(vf);
-
-    return 0;
-}
-
-static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
-{
-    struct ifcvf_hw *vf = adapter->vf;
-    int i;
-
-    for (i = 0; i < vf->nr_vring; i++) {
-        vf->vring[i].last_avail_idx = 0;
-        vf->vring[i].cb.callback = NULL;
-        vf->vring[i].cb.private = NULL;
-    }
-
-    ifcvf_reset(vf);
-}
-
  static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
  {
      return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
@@ -462,23 +435,15 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
    static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
  {
-    struct ifcvf_adapter *adapter;
-    struct ifcvf_hw *vf;
-    u8 status_old;
-
-    vf  = vdpa_to_vf(vdpa_dev);
-    adapter = vdpa_to_adapter(vdpa_dev);
-    status_old = ifcvf_get_status(vf);
+    struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+    u8 status = ifcvf_get_status(vf);
  -    if (status_old == 0)
-        return 0;
+    ifcvf_stop_hw(vf);
  -    if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
-        ifcvf_stop_datapath(adapter);
+    if (status & VIRTIO_CONFIG_S_DRIVER_OK)
          ifcvf_free_irq(vf);
-    }
  -    ifcvf_reset_vring(adapter);
+    ifcvf_reset(vf);
        return 0;
  }


_______________________________________________
Virtualization mailing list
Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx
https://lists.linuxfoundation.org/mailman/listinfo/virtualization




[Index of Archives]     [KVM Development]     [Libvirt Development]     [Libvirt Users]     [CentOS Virtualization]     [Netdev]     [Ethernet Bridging]     [Linux Wireless]     [Kernel Newbies]     [Security]     [Linux for Hams]     [Netfilter]     [Bugtraq]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux Admin]     [Samba]

  Powered by Linux