On 06/12/2018 04:05 PM, Michael S. Tsirkin wrote: > On Tue, Jun 12, 2018 at 03:40:28PM -0400, Nitesh Narayan Lal wrote: >> >> On 06/11/2018 10:05 PM, Michael S. Tsirkin wrote: >>> On Mon, Jun 11, 2018 at 11:18:59AM -0400, nilal@xxxxxxxxxx wrote: >>>> From: Nitesh Narayan Lal <nilal@xxxxxxxxxx> >>>> >>>> This patch enables the driver to expose a chain of buffers to the >>>> other end using vring descriptor followed by a kick. After which it >>>> busy waits till the update is done. >>>> >>>> Signed-off-by: Nitesh Narayan Lal <nilal@xxxxxxxxxx> >>>> --- >>>> drivers/virtio/virtio_ring.c | 157 ++++++++++++++++++++++++++++++++++++++++++- >>>> include/linux/virtio.h | 19 ++++++ >>>> 2 files changed, 175 insertions(+), 1 deletion(-) >>>> >>>> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c >>>> index 21d464a..586dd62 100644 >>>> --- a/drivers/virtio/virtio_ring.c >>>> +++ b/drivers/virtio/virtio_ring.c >>>> @@ -435,6 +435,136 @@ static inline int virtqueue_add(struct virtqueue *_vq, >>>> } >>>> >>>> /** >>>> + * virtqueue_add_chain - expose a chain of buffers to the other end >>>> + * @_vq: the struct virtqueue we're talking about. >>>> + * @head: desc id of the chain head. >>>> + * @indirect: set if the chain of descs are indrect descs. >>>> + * @indir_desc: the first indirect desc. >>>> + * @data: the token identifying the chain. >>>> + * @ctx: extra context for the token. >>>> + * >>>> + * Caller must ensure we don't call this with other virtqueue operations >>>> + * at the same time (except where noted). >>>> + * >>>> + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). >>>> + */ >>>> +int virtqueue_add_chain(struct virtqueue *_vq, >>>> + unsigned int head, >>>> + bool indirect, >>>> + struct vring_desc *indir_desc, >>>> + void *data, >>>> + void *ctx) >>>> +{ >>>> + struct vring_virtqueue *vq = to_vvq(_vq); >>>> + >>>> + /* The desc chain is empty. */ >>>> + if (head == VIRTQUEUE_DESC_ID_INIT) >>>> + return 0; >>>> + >>>> + START_USE(vq); >>>> + >>>> + if (unlikely(vq->broken)) { >>>> + END_USE(vq); >>>> + return -EIO; >>>> + } >>>> + >>>> + /* This is the data for callback, in our case may not be required. */ >>>> + vq->desc_state[head].data = data; >>>> + if (indirect) >>>> + vq->desc_state[head].indir_desc = indir_desc; >>>> + if (ctx) >>>> + vq->desc_state[head].indir_desc = ctx; >>>> + >>>> + vq->avail_idx_shadow = 1; >>>> + vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); >>>> + vq->num_added = 1; >>>> + END_USE(vq); >>>> + virtqueue_kick_sync(_vq); >>> I dislike the kick_sync being called internally. >>> IMHO it's preferable to have callers invoke it >>> as appropriate. >> I can call virtqueue_kick_sync from >> tell_host_one_page/tell_host_free_list function, will that make more sense? > I don't see why we need a new API, just call kick + get_buf > in a loop from tell_host. I can move this logic to the caller function itself. > > >>> >>>> + >>>> + return 0; >>>> +} >>>> +EXPORT_SYMBOL_GPL(virtqueue_add_chain); >>>> + >>>> +/** >>>> + * virtqueue_add_chain_desc - add a buffer to a chain using a vring desc >>>> + * @vq: the struct virtqueue we're talking about. >>>> + * @addr: address of the buffer to add. >>>> + * @len: length of the buffer. >>>> + * @head_id: desc id of the chain head. >>>> + * @prev_id: desc id of the previous buffer. >>>> + * @in: set if the buffer is for the device to write. >>>> + * >>>> + * Caller must ensure we don't call this with other virtqueue operations >>>> + * at the same time (except where noted). >>>> + * >>>> + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). >>>> + */ >>>> +int virtqueue_add_chain_desc(struct virtqueue *_vq, >>>> + u64 addr, >>>> + u32 len, >>>> + unsigned int *head_id, >>>> + unsigned int *prev_id, >>>> + bool in) >>>> +{ >>>> + struct vring_virtqueue *vq = to_vvq(_vq); >>>> + struct vring_desc *desc = vq->vring.desc; >>>> + u16 flags = in ? VRING_DESC_F_WRITE : 0; >>>> + unsigned int i; >>>> + >>>> + /* Sanity check */ >>>> + if (!_vq || !head_id || !prev_id) >>>> + return -EINVAL; >>>> +retry: >>>> + START_USE(vq); >>>> + if (unlikely(vq->broken)) { >>>> + END_USE(vq); >>>> + return -EIO; >>>> + } >>>> + >>>> + if (vq->vq.num_free < 1) { >>>> + /* >>>> + * If there is no desc avail in the vq, so kick what is >>>> + * already added, and re-start to build a new chain for >>>> + * the passed sg. >>>> + */ >>>> + if (likely(*head_id != VIRTQUEUE_DESC_ID_INIT)) { >>>> + END_USE(vq); >>>> + virtqueue_add_chain(_vq, *head_id, 0, NULL, vq, NULL); >>>> + virtqueue_kick_sync(_vq); >>>> + *head_id = VIRTQUEUE_DESC_ID_INIT; >>>> + *prev_id = VIRTQUEUE_DESC_ID_INIT; >>>> + goto retry; >>>> + } else { >>>> + END_USE(vq); >>>> + return -ENOSPC; >>>> + } >>>> + } >>>> + >>>> + i = vq->free_head; >>>> + flags &= ~VRING_DESC_F_NEXT; >>>> + desc[i].flags = cpu_to_virtio16(_vq->vdev, flags); >>>> + desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); >>>> + desc[i].len = cpu_to_virtio32(_vq->vdev, len); >>>> + >>>> + /* Add the desc to the end of the chain */ >>>> + if (*prev_id != VIRTQUEUE_DESC_ID_INIT) { >>>> + desc[*prev_id].next = cpu_to_virtio16(_vq->vdev, i); >>>> + desc[*prev_id].flags |= cpu_to_virtio16(_vq->vdev, >>>> + VRING_DESC_F_NEXT); >>>> + } >>>> + *prev_id = i; >>>> + if (*head_id == VIRTQUEUE_DESC_ID_INIT) >>>> + *head_id = *prev_id; >>>> + >>>> + vq->vq.num_free--; >>>> + vq->free_head = virtio16_to_cpu(_vq->vdev, desc[i].next); >>>> + END_USE(vq); >>>> + >>>> + return 0; >>>> +} >>>> +EXPORT_SYMBOL_GPL(virtqueue_add_chain_desc); >>>> + >>>> +/** >>>> * virtqueue_add_sgs - expose buffers to other end >>>> * @vq: the struct virtqueue we're talking about. >>>> * @sgs: array of terminated scatterlists. >>>> @@ -556,7 +686,6 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq) >>>> START_USE(vq); >>>> /* We need to expose available array entries before checking avail >>>> * event. */ >>>> - virtio_mb(vq->weak_barriers); >>>> >>>> old = vq->avail_idx_shadow - vq->num_added; >>>> new = vq->avail_idx_shadow; >>>> @@ -606,6 +735,32 @@ bool virtqueue_notify(struct virtqueue *_vq) >>>> EXPORT_SYMBOL_GPL(virtqueue_notify); >>>> >>>> /** >>>> + * virtqueue_kick_sync - update after add_buf and busy wait till update is done >>>> + * @vq: the struct virtqueue >>>> + * >>>> + * After one or more virtqueue_add_* calls, invoke this to kick >>>> + * the other side. Busy wait till the other side is done with the update. >>>> + * >>>> + * Caller must ensure we don't call this with other virtqueue >>>> + * operations at the same time (except where noted). >>>> + * >>>> + * Returns false if kick failed, otherwise true. >>>> + */ >>>> +bool virtqueue_kick_sync(struct virtqueue *vq) >>>> +{ >>>> + u32 len; >>>> + >>>> + if (likely(virtqueue_kick(vq))) { >>>> + while (!virtqueue_get_buf(vq, &len) && >>>> + !virtqueue_is_broken(vq)) >>>> + cpu_relax(); >>>> + return true; >>>> + } >>>> + return false; >>>> +} >>>> +EXPORT_SYMBOL_GPL(virtqueue_kick_sync); >>>> + >>>> +/** >>>> * virtqueue_kick - update after add_buf >>>> * @vq: the struct virtqueue >>>> * >>>> diff --git a/include/linux/virtio.h b/include/linux/virtio.h >>>> index fa1b5da..0e33630 100644 >>>> --- a/include/linux/virtio.h >>>> +++ b/include/linux/virtio.h >>>> @@ -57,6 +57,25 @@ int virtqueue_add_sgs(struct virtqueue *vq, >>>> unsigned int in_sgs, >>>> void *data, >>>> gfp_t gfp); >>>> +/* A desc with this init id is treated as an invalid desc */ >>>> +#define VIRTQUEUE_DESC_ID_INIT UINT_MAX >>>> +int virtqueue_add_chain_desc(struct virtqueue *_vq, >>>> + u64 addr, >>>> + u32 len, >>>> + unsigned int *head_id, >>>> + unsigned int *prev_id, >>>> + bool in); >>>> + >>>> +int virtqueue_add_chain(struct virtqueue *_vq, >>>> + unsigned int head, >>>> + bool indirect, >>>> + struct vring_desc *indirect_desc, >>>> + void *data, >>>> + void *ctx); >>>> + >>>> +bool virtqueue_kick_sync(struct virtqueue *vq); >>>> + >>>> +bool virtqueue_kick_async(struct virtqueue *vq, wait_queue_head_t wq); >>>> >>>> bool virtqueue_kick(struct virtqueue *vq); >>>> >>>> -- >>>> 2.9.5 >> -- >> Regards >> Nitesh >> > > -- Regards Nitesh
Attachment:
signature.asc
Description: OpenPGP digital signature