This patch has the core work queueing function take a worker for when we support multiple workers. It also adds a helper that takes a vq during queueing so modules can control which vq/worker to queue work on. This temp leaves vhost_work_queue. It will be removed when the drivers are converted in the next patches. Signed-off-by: Mike Christie <michael.christie@xxxxxxxxxx> --- drivers/vhost/vhost.c | 44 +++++++++++++++++++++++++++---------------- drivers/vhost/vhost.h | 1 + 2 files changed, 29 insertions(+), 16 deletions(-) diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index aafb23e12477..611e495eeb3c 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -231,21 +231,10 @@ void vhost_poll_stop(struct vhost_poll *poll) } EXPORT_SYMBOL_GPL(vhost_poll_stop); -void vhost_dev_flush(struct vhost_dev *dev) +static bool vhost_worker_queue(struct vhost_worker *worker, + struct vhost_work *work) { - struct vhost_flush_struct flush; - - init_completion(&flush.wait_event); - vhost_work_init(&flush.work, vhost_flush_work); - - if (vhost_work_queue(dev, &flush.work)) - wait_for_completion(&flush.wait_event); -} -EXPORT_SYMBOL_GPL(vhost_dev_flush); - -bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) -{ - if (!dev->worker) + if (!worker) return false; /* * vsock can queue while we do a VHOST_SET_OWNER, so we have a smp_wmb @@ -257,14 +246,37 @@ bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) * sure it was not in the list. * test_and_set_bit() implies a memory barrier. */ - llist_add(&work->node, &dev->worker->work_list); - vhost_task_wake(dev->worker->vtsk); + llist_add(&work->node, &worker->work_list); + vhost_task_wake(worker->vtsk); } return true; } + +bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) +{ + return vhost_worker_queue(dev->worker, work); +} EXPORT_SYMBOL_GPL(vhost_work_queue); +bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work) +{ + return vhost_worker_queue(vq->worker, work); +} +EXPORT_SYMBOL_GPL(vhost_vq_work_queue); + +void vhost_dev_flush(struct vhost_dev *dev) +{ + struct vhost_flush_struct flush; + + init_completion(&flush.wait_event); + vhost_work_init(&flush.work, vhost_flush_work); + + if (vhost_work_queue(dev, &flush.work)) + wait_for_completion(&flush.wait_event); +} +EXPORT_SYMBOL_GPL(vhost_dev_flush); + /* A lockless hint for busy polling code to exit the loop */ bool vhost_vq_has_work(struct vhost_virtqueue *vq) { diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index d5728f986744..e2dd4558a1f9 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -198,6 +198,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *, struct vhost_log *log, unsigned int *log_num); void vhost_discard_vq_desc(struct vhost_virtqueue *, int n); +bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work); bool vhost_vq_has_work(struct vhost_virtqueue *vq); bool vhost_vq_is_setup(struct vhost_virtqueue *vq); int vhost_vq_init_access(struct vhost_virtqueue *); -- 2.25.1 _______________________________________________ Virtualization mailing list Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx https://lists.linuxfoundation.org/mailman/listinfo/virtualization