DMA-related logic is separated from virtqueue_add_packed to prepare for the subsequent support for premapped. Signed-off-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> --- drivers/virtio/virtio_ring.c | 150 ++++++++++++++++++----------------- 1 file changed, 78 insertions(+), 72 deletions(-) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 42b1ff87518e..47b6f9152f9f 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -1329,7 +1329,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, unsigned int total_sg, unsigned int out_sgs, unsigned int in_sgs, - void *data, struct vring_packed_desc *desc) { struct scatterlist *sg; @@ -1345,14 +1344,9 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, for (n = 0; n < out_sgs + in_sgs; n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { - addr = vring_map_one_sg(vq, sg, n < out_sgs ? - DMA_TO_DEVICE : DMA_FROM_DEVICE); - if (vring_mapping_error(vq, addr)) - goto unmap_release; - desc[i].flags = cpu_to_le16(n < out_sgs ? 0 : VRING_DESC_F_WRITE); - desc[i].addr = cpu_to_le64(addr); + desc[i].addr = cpu_to_le64(sg->dma_address); desc[i].len = cpu_to_le32(sg->length); i++; } @@ -1363,7 +1357,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, total_sg * sizeof(struct vring_packed_desc), DMA_TO_DEVICE); if (vring_mapping_error(vq, addr)) - goto unmap_release; + return -ENOMEM; vq->packed.vring.desc[head].addr = cpu_to_le64(addr); vq->packed.vring.desc[head].len = cpu_to_le32(total_sg * @@ -1404,53 +1398,30 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, /* Store token and indirect buffer state. */ vq->packed.desc_state[id].num = 1; - vq->packed.desc_state[id].data = data; vq->packed.desc_state[id].indir_desc = desc; vq->packed.desc_state[id].last = id; vq->num_added += 1; pr_debug("Added buffer head %i to %p\n", head, vq); - END_USE(vq); return 0; - -unmap_release: - err_idx = i; - - for (i = 0; i < err_idx; i++) - vring_unmap_desc_packed(vq, &desc[i]); - - kfree(desc); - - END_USE(vq); - return -ENOMEM; } -static inline int virtqueue_add_packed(struct virtqueue *_vq, - struct scatterlist *sgs[], - unsigned int total_sg, - unsigned int out_sgs, - unsigned int in_sgs, - void *data, - void *ctx, - gfp_t gfp) +static inline int virtqueue_add_packed_prepare(struct vring_virtqueue *vq, + unsigned int total_sg, + void *data, + void *ctx, + struct vring_packed_desc **pdesc, + gfp_t gfp) { - struct vring_virtqueue *vq = to_vvq(_vq); struct vring_packed_desc *desc; - struct scatterlist *sg; - unsigned int i, n, c, descs_used, err_idx; - __le16 head_flags, flags; - u16 head, id, prev, curr, avail_used_flags; - int err; - - START_USE(vq); + unsigned int descs_used; BUG_ON(data == NULL); BUG_ON(ctx && vq->indirect); if (unlikely(vq->broken)) { - END_USE(vq); return -EIO; } @@ -1458,39 +1429,55 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, BUG_ON(total_sg == 0); + desc = NULL; + if (virtqueue_use_indirect(vq, total_sg)) { desc = alloc_indirect_packed(total_sg, gfp); if (desc) { if (unlikely(vq->vq.num_free < 1)) { pr_debug("Can't add buf len 1 - avail = 0\n"); kfree(desc); - END_USE(vq); return -ENOSPC; } - return virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs, - in_sgs, data, desc); + return 0; } /* fall back on direct */ } - head = vq->packed.next_avail_idx; - avail_used_flags = vq->packed.avail_used_flags; - WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect); - desc = vq->packed.vring.desc; - i = head; descs_used = total_sg; if (unlikely(vq->vq.num_free < descs_used)) { pr_debug("Can't add buf len %i - avail = %i\n", descs_used, vq->vq.num_free); - END_USE(vq); return -ENOSPC; } + *pdesc = desc; + + return 0; +} + +static void virtqueue_add_packed_vring(struct vring_virtqueue *vq, + struct scatterlist *sgs[], + unsigned int total_sg, + unsigned int out_sgs, + unsigned int in_sgs) +{ + struct vring_packed_desc *desc; + struct scatterlist *sg; + unsigned int i, n, c, descs_used; + __le16 head_flags, flags; + u16 head, id, prev, curr; + + desc = vq->packed.vring.desc; + head = vq->packed.next_avail_idx; + i = head; + descs_used = total_sg; + id = vq->free_head; BUG_ON(id == vq->packed.vring.num); @@ -1498,11 +1485,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, c = 0; for (n = 0; n < out_sgs + in_sgs; n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { - dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ? - DMA_TO_DEVICE : DMA_FROM_DEVICE); - if (vring_mapping_error(vq, addr)) - goto unmap_release; - flags = cpu_to_le16(vq->packed.avail_used_flags | (++c == total_sg ? 0 : VRING_DESC_F_NEXT) | (n < out_sgs ? 0 : VRING_DESC_F_WRITE)); @@ -1511,12 +1493,12 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, else desc[i].flags = flags; - desc[i].addr = cpu_to_le64(addr); + desc[i].addr = cpu_to_le64(sg->dma_address); desc[i].len = cpu_to_le32(sg->length); desc[i].id = cpu_to_le16(id); if (unlikely(vq->use_dma_api)) { - vq->packed.desc_extra[curr].addr = addr; + vq->packed.desc_extra[curr].addr = sg->dma_address; vq->packed.desc_extra[curr].len = sg->length; vq->packed.desc_extra[curr].flags = le16_to_cpu(flags); @@ -1545,8 +1527,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, /* Store token. */ vq->packed.desc_state[id].num = descs_used; - vq->packed.desc_state[id].data = data; - vq->packed.desc_state[id].indir_desc = ctx; vq->packed.desc_state[id].last = prev; /* @@ -1559,29 +1539,55 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, vq->num_added += descs_used; pr_debug("Added buffer head %i to %p\n", head, vq); - END_USE(vq); +} - return 0; +static inline int virtqueue_add_packed(struct virtqueue *_vq, + struct scatterlist *sgs[], + unsigned int total_sg, + unsigned int out_sgs, + unsigned int in_sgs, + void *data, + void *ctx, + gfp_t gfp) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + struct vring_packed_desc *desc; + u16 id; + int err; -unmap_release: - err_idx = i; - i = head; - curr = vq->free_head; + START_USE(vq); - vq->packed.avail_used_flags = avail_used_flags; + /* check vq state and try to alloc desc for indirect. */ + err = virtqueue_add_packed_prepare(vq, total_sg, data, ctx, &desc, gfp); + if (err) + goto end; - for (n = 0; n < total_sg; n++) { - if (i == err_idx) - break; - vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]); - curr = vq->packed.desc_extra[curr].next; - i++; - if (i >= vq->packed.vring.num) - i = 0; + err = virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, in_sgs); + if (err) + goto err; + + id = vq->free_head; + + if (desc) { + err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs, in_sgs, desc); + if (err) + goto err; + } else { + virtqueue_add_packed_vring(vq, sgs, total_sg, out_sgs, in_sgs); + vq->packed.desc_state[id].indir_desc = ctx; } + vq->packed.desc_state[id].data = data; + + goto end; + +err: + virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs); + kfree(desc); + +end: END_USE(vq); - return -EIO; + return err; } static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq) -- 2.32.0.3.g01195cf9f _______________________________________________ Virtualization mailing list Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx https://lists.linuxfoundation.org/mailman/listinfo/virtualization