Re: [PATCH V2 14/20] vhost: multiqueue support

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 01/25/2013 06:35 PM, Jason Wang wrote:
> This patch lets vhost support multiqueue. The idea is simple, just launching
> multiple threads of vhost and let each of vhost thread processing a subset of
> the virtqueues of the device. After this change each emulated device can have
> multiple vhost threads as its backend.
>
> To do this, a virtqueue index were introduced to record to first virtqueue that
> will be handled by this vhost_net device. Based on this and nvqs, vhost could
> calculate its relative index to setup vhost_net device.
>
> Since we may have many vhost/net devices for a virtio-net device. The setting of
> guest notifiers were moved out of the starting/stopping of a specific vhost
> thread. The vhost_net_{start|stop}() were renamed to
> vhost_net_{start|stop}_one(), and a new vhost_net_{start|stop}() were introduced
> to configure the guest notifiers and start/stop all vhost/vhost_net devices.
>
> Signed-off-by: Jason Wang <jasowang@xxxxxxxxxx>
> ---
>  hw/vhost.c      |   82 +++++++++++++++++++++---------------------------
>  hw/vhost.h      |    2 +
>  hw/vhost_net.c  |   92 ++++++++++++++++++++++++++++++++++++++++++++++++++-----
>  hw/vhost_net.h  |    6 ++-
>  hw/virtio-net.c |    4 +-
>  5 files changed, 128 insertions(+), 58 deletions(-)
>
> diff --git a/hw/vhost.c b/hw/vhost.c
> index cee8aad..38257b9 100644
> --- a/hw/vhost.c
> +++ b/hw/vhost.c
> @@ -619,14 +619,17 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
>  {
>      hwaddr s, l, a;
>      int r;
> +    int vhost_vq_index = idx - dev->vq_index;
>      struct vhost_vring_file file = {
> -        .index = idx,
> +        .index = vhost_vq_index
>      };
>      struct vhost_vring_state state = {
> -        .index = idx,
> +        .index = vhost_vq_index
>      };
>      struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
>  
> +    assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
> +
>      vq->num = state.num = virtio_queue_get_num(vdev, idx);
>      r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
>      if (r) {
> @@ -669,11 +672,12 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
>          goto fail_alloc_ring;
>      }
>  
> -    r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled);
> +    r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
>      if (r < 0) {
>          r = -errno;
>          goto fail_alloc;
>      }
> +
>      file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
>      r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
>      if (r) {
> @@ -709,9 +713,10 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev,
>                                      unsigned idx)
>  {
>      struct vhost_vring_state state = {
> -        .index = idx,
> +        .index = idx - dev->vq_index
>      };
>      int r;
> +    assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
>      r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state);
>      if (r < 0) {
>          fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
> @@ -867,7 +872,9 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
>      }
>  
>      for (i = 0; i < hdev->nvqs; ++i) {
> -        r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, true);
> +        r = vdev->binding->set_host_notifier(vdev->binding_opaque,
> +                                             hdev->vq_index + i,
> +                                             true);
>          if (r < 0) {
>              fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
>              goto fail_vq;
> @@ -877,7 +884,9 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
>      return 0;
>  fail_vq:
>      while (--i >= 0) {
> -        r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false);
> +        r = vdev->binding->set_host_notifier(vdev->binding_opaque,
> +                                             hdev->vq_index + i,
> +                                             false);
>          if (r < 0) {
>              fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
>              fflush(stderr);
> @@ -898,7 +907,9 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
>      int i, r;
>  
>      for (i = 0; i < hdev->nvqs; ++i) {
> -        r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false);
> +        r = vdev->binding->set_host_notifier(vdev->binding_opaque,
> +                                             hdev->vq_index + i,
> +                                             false);
>          if (r < 0) {
>              fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
>              fflush(stderr);
> @@ -912,8 +923,9 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
>   */
>  bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
>  {
> -    struct vhost_virtqueue *vq = hdev->vqs + n;
> +    struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
>      assert(hdev->started);
> +    assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
>      return event_notifier_test_and_clear(&vq->masked_notifier);
>  }
>  
> @@ -922,15 +934,16 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
>                           bool mask)
>  {
>      struct VirtQueue *vvq = virtio_get_queue(vdev, n);
> -    int r;
> +    int r, index = n - hdev->vq_index;
>  
>      assert(hdev->started);
> +    assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
>  
>      struct vhost_vring_file file = {
> -        .index = n,
> +        .index = index
>      };
>      if (mask) {
> -        file.fd = event_notifier_get_fd(&hdev->vqs[n].masked_notifier);
> +        file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
>      } else {
>          file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
>      }
> @@ -945,20 +958,6 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
>  
>      hdev->started = true;
>  
> -    if (!vdev->binding->set_guest_notifiers) {
> -        fprintf(stderr, "binding does not support guest notifiers\n");
> -        r = -ENOSYS;
> -        goto fail;
> -    }
> -
> -    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque,
> -                                           hdev->nvqs,
> -                                           true);
> -    if (r < 0) {
> -        fprintf(stderr, "Error binding guest notifier: %d\n", -r);
> -        goto fail_notifiers;
> -    }
> -
>      r = vhost_dev_set_features(hdev, hdev->log_enabled);
>      if (r < 0) {
>          goto fail_features;
> @@ -970,9 +969,9 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
>      }
>      for (i = 0; i < hdev->nvqs; ++i) {
>          r = vhost_virtqueue_start(hdev,
> -                                 vdev,
> -                                 hdev->vqs + i,
> -                                 i);
> +                                  vdev,
> +                                  hdev->vqs + i,
> +                                  hdev->vq_index + i);
>          if (r < 0) {
>              goto fail_vq;
>          }
> @@ -995,15 +994,13 @@ fail_log:
>  fail_vq:
>      while (--i >= 0) {
>          vhost_virtqueue_stop(hdev,
> -                                vdev,
> -                                hdev->vqs + i,
> -                                i);
> +                             vdev,
> +                             hdev->vqs + i,
> +                             hdev->vq_index + i);
>      }
> +    i = hdev->nvqs;
>  fail_mem:
>  fail_features:
> -    vdev->binding->set_guest_notifiers(vdev->binding_opaque, hdev->nvqs, false);
> -fail_notifiers:
> -fail:
>  
>      hdev->started = false;
>      return r;
> @@ -1012,29 +1009,22 @@ fail:
>  /* Host notifiers must be enabled at this point. */
>  void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
>  {
> -    int i, r;
> +    int i;
>  
>      for (i = 0; i < hdev->nvqs; ++i) {
>          vhost_virtqueue_stop(hdev,
> -                                vdev,
> -                                hdev->vqs + i,
> -                                i);
> +                             vdev,
> +                             hdev->vqs + i,
> +                             hdev->vq_index + i);
>      }
>      for (i = 0; i < hdev->n_mem_sections; ++i) {
>          vhost_sync_dirty_bitmap(hdev, &hdev->mem_sections[i],
>                                  0, (hwaddr)~0x0ull);
>      }
> -    r = vdev->binding->set_guest_notifiers(vdev->binding_opaque,
> -                                           hdev->nvqs,
> -                                           false);
> -    if (r < 0) {
> -        fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
> -        fflush(stderr);
> -    }
> -    assert (r >= 0);
>  
>      hdev->started = false;
>      g_free(hdev->log);
>      hdev->log = NULL;
>      hdev->log_size = 0;
>  }
> +
> diff --git a/hw/vhost.h b/hw/vhost.h
> index 44c61a5..f062d48 100644
> --- a/hw/vhost.h
> +++ b/hw/vhost.h
> @@ -35,6 +35,8 @@ struct vhost_dev {
>      MemoryRegionSection *mem_sections;
>      struct vhost_virtqueue *vqs;
>      int nvqs;
> +    /* the first virtuque which would be used by this vhost dev */
> +    int vq_index;
>      unsigned long long features;
>      unsigned long long acked_features;
>      unsigned long long backend_features;
> diff --git a/hw/vhost_net.c b/hw/vhost_net.c
> index d3a04ca..c955611 100644
> --- a/hw/vhost_net.c
> +++ b/hw/vhost_net.c
> @@ -140,12 +140,21 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev)
>      return vhost_dev_query(&net->dev, dev);
>  }
>  
> -int vhost_net_start(struct vhost_net *net,
> -                    VirtIODevice *dev)
> +static int vhost_net_start_one(struct vhost_net *net,
> +                               VirtIODevice *dev,
> +                               int vq_index)
>  {
>      struct vhost_vring_file file = { };
>      int r;
>  
> +    if (net->dev.started) {
> +        return 0;
> +    }
> +
> +    net->dev.nvqs = 2;
> +    net->dev.vqs = net->vqs;
> +    net->dev.vq_index = vq_index;
> +
>      r = vhost_dev_enable_notifiers(&net->dev, dev);
>      if (r < 0) {
>          goto fail_notifiers;
> @@ -181,11 +190,15 @@ fail_notifiers:
>      return r;
>  }
>  
> -void vhost_net_stop(struct vhost_net *net,
> -                    VirtIODevice *dev)
> +static void vhost_net_stop_one(struct vhost_net *net,
> +                               VirtIODevice *dev)
>  {
>      struct vhost_vring_file file = { .fd = -1 };
>  
> +    if (!net->dev.started) {
> +        return;
> +    }
> +
>      for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
>          int r = ioctl(net->dev.control, VHOST_NET_SET_BACKEND, &file);
>          assert(r >= 0);
> @@ -195,6 +208,65 @@ void vhost_net_stop(struct vhost_net *net,
>      vhost_dev_disable_notifiers(&net->dev, dev);
>  }
>  
> +int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
> +                    int start_queues, int total_queues)
> +{
> +    int r, i = 0;
> +
> +    if (!dev->binding->set_guest_notifiers) {
> +        error_report("binding does not support guest notifiers\n");
> +        r = -ENOSYS;
> +        goto err;
> +    }
> +
> +    for (i = start_queues; i < total_queues; i++) {
> +        vhost_net_stop_one(tap_get_vhost_net(ncs[i].peer), dev);
> +    }
> +

Since kernel will support polling/writing when detached, there's no need
to stop the vhost threads that is polling the disabled queue here. This
can further simplify the interface between virtio-net and vhost.

Will send a new version.
> +    for (i = 0; i < start_queues; i++) {
> +        r = vhost_net_start_one(tap_get_vhost_net(ncs[i].peer), dev, i * 2);
> +
> +        if (r < 0) {
> +            goto err;
> +        }
> +    }
> +
> +    r = dev->binding->set_guest_notifiers(dev->binding_opaque,
> +                                          start_queues * 2,
> +                                          true);
> +    if (r < 0) {
> +        error_report("Error binding guest notifier: %d\n", -r);
> +        goto err;
> +    }
> +
> +    return 0;
> +
> +err:
> +    while (--i >= 0) {
> +        vhost_net_stop_one(tap_get_vhost_net(ncs[i].peer), dev);
> +    }
> +    return r;
> +}
> +
> +void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
> +                    int start_queues, int total_queues)
> +{
> +    int i, r;
> +
> +    r = dev->binding->set_guest_notifiers(dev->binding_opaque,
> +                                          start_queues * 2,
> +                                          false);
> +    if (r < 0) {
> +        fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
> +        fflush(stderr);
> +    }
> +    assert(r >= 0);
> +
> +    for (i = 0; i < total_queues; i++) {
> +        vhost_net_stop_one(tap_get_vhost_net(ncs[i].peer), dev);
> +    }
> +}
> +
>  void vhost_net_cleanup(struct vhost_net *net)
>  {
>      vhost_dev_cleanup(&net->dev);
> @@ -224,13 +296,17 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev)
>      return false;
>  }
>  
> -int vhost_net_start(struct vhost_net *net,
> -		    VirtIODevice *dev)
> +int vhost_net_start(VirtIODevice *dev,
> +                    NetClientState *ncs,
> +                    int start_queues,
> +                    int total_queues)
>  {
>      return -ENOSYS;
>  }
> -void vhost_net_stop(struct vhost_net *net,
> -		    VirtIODevice *dev)
> +void vhost_net_stop(VirtIODevice *dev,
> +                    NetClientState *ncs,
> +                    int start_queues,
> +                    int total_queues)
>  {
>  }
>  
> diff --git a/hw/vhost_net.h b/hw/vhost_net.h
> index 88912b8..9fbd79d 100644
> --- a/hw/vhost_net.h
> +++ b/hw/vhost_net.h
> @@ -9,8 +9,10 @@ typedef struct vhost_net VHostNetState;
>  VHostNetState *vhost_net_init(NetClientState *backend, int devfd, bool force);
>  
>  bool vhost_net_query(VHostNetState *net, VirtIODevice *dev);
> -int vhost_net_start(VHostNetState *net, VirtIODevice *dev);
> -void vhost_net_stop(VHostNetState *net, VirtIODevice *dev);
> +int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
> +                    int start_queues, int total_queues);
> +void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
> +                    int start_queues, int total_queues);
>  
>  void vhost_net_cleanup(VHostNetState *net);
>  
> diff --git a/hw/virtio-net.c b/hw/virtio-net.c
> index 47f4ab4..2f49fd8 100644
> --- a/hw/virtio-net.c
> +++ b/hw/virtio-net.c
> @@ -129,14 +129,14 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
>              return;
>          }
>          n->vhost_started = 1;
> -        r = vhost_net_start(tap_get_vhost_net(nc->peer), &n->vdev);
> +        r = vhost_net_start(&n->vdev, nc, 1, 1);
>          if (r < 0) {
>              error_report("unable to start vhost net: %d: "
>                           "falling back on userspace virtio", -r);
>              n->vhost_started = 0;
>          }
>      } else {
> -        vhost_net_stop(tap_get_vhost_net(nc->peer), &n->vdev);
> +        vhost_net_stop(&n->vdev, nc, 1, 1);
>          n->vhost_started = 0;
>      }
>  }

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux