[PATCH V5 12/12] vhost: allow worker attachment after initial setup

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch allows userspace to change the vq to worker mapping while it's
in use so tools can do this setup post device creation if needed.

Signed-off-by: Mike Christie <michael.christie@xxxxxxxxxx>
---
 drivers/vhost/vhost.c      | 76 +++++++++++++++++++++++++++-----------
 drivers/vhost/vhost.h      |  2 +-
 include/uapi/linux/vhost.h |  2 +-
 3 files changed, 57 insertions(+), 23 deletions(-)

diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 1af43b5d1dbd..f702df0ce33f 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -232,12 +232,9 @@ void vhost_poll_stop(struct vhost_poll *poll)
 }
 EXPORT_SYMBOL_GPL(vhost_poll_stop);
 
-static void vhost_work_queue_on(struct vhost_worker *worker,
-				struct vhost_work *work)
+static void vhost_worker_work_queue(struct vhost_worker *worker,
+				    struct vhost_work *work)
 {
-	if (!worker)
-		return;
-
 	if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
 		/* We can only add the work to the list after we're
 		 * sure it was not in the list.
@@ -248,21 +245,32 @@ static void vhost_work_queue_on(struct vhost_worker *worker,
 	}
 }
 
-static int vhost_workers_idr_flush_iter(int id, void *worker, void *dev)
+static void vhost_worker_flush(struct vhost_worker *worker)
 {
 	struct vhost_flush_struct flush;
 
 	init_completion(&flush.wait_event);
 	vhost_work_init(&flush.work, vhost_flush_work);
 
-	vhost_work_queue_on(worker, &flush.work);
+	vhost_worker_work_queue(worker, &flush.work);
 	wait_for_completion(&flush.wait_event);
+}
+
+static int vhost_workers_idr_flush_iter(int id, void *worker, void *dev)
+{
+	vhost_worker_flush(worker);
 	return 0;
 }
 
 void vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
 {
-	vhost_work_queue_on(vq->worker, work);
+	struct vhost_worker *worker;
+
+	rcu_read_lock();
+	worker = rcu_dereference(vq->worker);
+	if (worker)
+		vhost_worker_work_queue(worker, work);
+	rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(vhost_vq_work_queue);
 
@@ -282,7 +290,16 @@ EXPORT_SYMBOL_GPL(vhost_work_dev_flush);
 /* A lockless hint for busy polling code to exit the loop */
 bool vhost_vq_has_work(struct vhost_virtqueue *vq)
 {
-	return vq->worker && !llist_empty(&vq->worker->work_list);
+	struct vhost_worker *worker;
+	bool has_work = false;
+
+	rcu_read_lock();
+	worker = rcu_dereference(vq->worker);
+	if (worker && !llist_empty(&worker->work_list))
+		has_work = true;
+	rcu_read_unlock();
+
+	return has_work;
 }
 EXPORT_SYMBOL_GPL(vhost_vq_has_work);
 
@@ -507,7 +524,7 @@ void vhost_dev_init(struct vhost_dev *dev,
 		vq->log = NULL;
 		vq->indirect = NULL;
 		vq->heads = NULL;
-		vq->worker = NULL;
+		rcu_assign_pointer(vq->worker, NULL);
 		vq->dev = dev;
 		mutex_init(&vq->mutex);
 		vhost_vq_reset(dev, vq);
@@ -587,11 +604,30 @@ static void vhost_worker_put(struct vhost_dev *dev, struct vhost_worker *worker)
 	kfree(worker);
 }
 
-static void vhost_vq_detach_worker(struct vhost_virtqueue *vq)
+static void vhost_vq_swap_worker(struct vhost_virtqueue *vq,
+				 struct vhost_worker *new_worker, bool flush)
 {
-	if (vq->worker)
-		vhost_worker_put(vq->dev, vq->worker);
-	vq->worker = NULL;
+	struct vhost_worker *old_worker;
+
+	old_worker = rcu_dereference_check(vq->worker,
+					   lockdep_is_held(&vq->dev->mutex));
+	rcu_assign_pointer(vq->worker, new_worker);
+
+	if (!old_worker)
+		return;
+
+	if (flush) {
+		/*
+		 * For dev cleanup we won't have work running, but for the
+		 * dynamic attach case we might so make sure we see the new
+		 * worker and there is no work in the old worker when we
+		 * return.
+		 */
+		synchronize_rcu();
+		vhost_worker_flush(old_worker);
+	}
+
+	vhost_worker_put(vq->dev, old_worker);
 }
 
 static int vhost_workers_idr_iter(int id, void *worker, void *dev)
@@ -608,7 +644,7 @@ static void vhost_workers_free(struct vhost_dev *dev)
 		return;
 
 	for (i = 0; i < dev->nvqs; i++)
-		vhost_vq_detach_worker(dev->vqs[i]);
+		vhost_vq_swap_worker(dev->vqs[i], NULL, false);
 
 	idr_for_each(&dev->worker_idr, vhost_workers_idr_iter, dev);
 }
@@ -664,18 +700,13 @@ static int vhost_vq_attach_worker(struct vhost_virtqueue *vq,
 	if (!dev->use_worker)
 		return -EINVAL;
 
-	/* We don't support setting a worker on an active vq */
-	if (vq->private_data)
-		return -EBUSY;
-
 	worker = idr_find(&dev->worker_idr, info->worker_id);
 	if (!worker)
 		return -ENODEV;
 
 	refcount_inc(&worker->refcount);
 
-	vhost_vq_detach_worker(vq);
-	vq->worker = worker;
+	vhost_vq_swap_worker(vq, worker, true);
 	return 0;
 }
 
@@ -1826,7 +1857,10 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
 			r = -EFAULT;
 			break;
 		}
+		/* We might flush worker, so we can't hold the vq mutex. */
+		mutex_unlock(&vq->mutex);
 		r = vhost_vq_attach_worker(vq, &w);
+		mutex_lock(&vq->mutex);
 		if (!r && copy_to_user(argp, &w, sizeof(w)))
 			r = -EFAULT;
 		break;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 1738388fa02d..7903ac6bc92d 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -81,7 +81,7 @@ struct vhost_vring_call {
 /* The virtqueue structure describes a queue attached to a device. */
 struct vhost_virtqueue {
 	struct vhost_dev *dev;
-	struct vhost_worker *worker;
+	struct vhost_worker __rcu *worker;
 
 	/* The actual ring of buffers. */
 	struct mutex mutex;
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index 117ea92b3925..e0221c8ce877 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -88,7 +88,7 @@
 #define VHOST_SET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x13, struct vhost_vring_state)
 #define VHOST_GET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x14, struct vhost_vring_state)
 /* Attach a vhost_worker created with VHOST_NEW_WORKER to one of the device's
- * virtqueues. This must be done before the virtqueue is active.
+ * virtqueues.
  */
 #define VHOST_ATTACH_VRING_WORKER _IOR(VHOST_VIRTIO, 0x15,		\
 				       struct vhost_vring_worker)
-- 
2.25.1




[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [SCSI Target Devel]     [Linux SCSI Target Infrastructure]     [Kernel Newbies]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Linux IIO]     [Samba]     [Device Mapper]

  Powered by Linux