Re: [PATCH vhost v1 06/12] virtio_ring: packed: separate DMA codes

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Mar 2, 2023 at 7:59 PM Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> wrote:
>
> DMA-related logic is separated from the virtqueue_add_vring_packed() to
> prepare for subsequent support for premapped.
>
> DMA address will be saved as sg->dma_address, then
> virtqueue_add_vring_packed() will use it directly.
>
> If it is a premapped scene, the transmitted sgs should have saved DMA
> address in dma_address, and in virtio core, we need to pass
> virtqueue_map_sgs().
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx>
> ---
>  drivers/virtio/virtio_ring.c | 59 ++++++++----------------------------
>  1 file changed, 12 insertions(+), 47 deletions(-)
>
> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> index 6796cbee0207..81a9c0692fec 100644
> --- a/drivers/virtio/virtio_ring.c
> +++ b/drivers/virtio/virtio_ring.c
> @@ -1350,7 +1350,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
>                                          struct vring_packed_desc *desc)
>  {
>         struct scatterlist *sg;
> -       unsigned int i, n, err_idx;
> +       unsigned int i, n;
>         u16 head, id;
>         dma_addr_t addr;
>
> @@ -1362,14 +1362,9 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
>
>         for (n = 0; n < out_sgs + in_sgs; n++) {
>                 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
> -                       addr = vring_map_one_sg(vq, sg, n < out_sgs ?
> -                                       DMA_TO_DEVICE : DMA_FROM_DEVICE);
> -                       if (vring_mapping_error(vq, addr))
> -                               goto unmap_release;
> -
>                         desc[i].flags = cpu_to_le16(n < out_sgs ?
>                                                 0 : VRING_DESC_F_WRITE);
> -                       desc[i].addr = cpu_to_le64(addr);
> +                       desc[i].addr = cpu_to_le64(sg->dma_address);
>                         desc[i].len = cpu_to_le32(sg->length);
>                         i++;
>                 }
> @@ -1380,7 +1375,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
>                         total_sg * sizeof(struct vring_packed_desc),
>                         DMA_TO_DEVICE);
>         if (vring_mapping_error(vq, addr))
> -               goto unmap_release;
> +               return -ENOMEM;
>
>         vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
>         vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
> @@ -1429,16 +1424,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
>         pr_debug("Added buffer head %i to %p\n", head, vq);
>
>         return 0;
> -
> -unmap_release:
> -       err_idx = i;
> -
> -       for (i = 0; i < err_idx; i++)
> -               vring_unmap_desc_packed(vq, &desc[i]);
> -
> -       kfree(desc);
> -
> -       return -ENOMEM;
>  }
>
>  static inline struct vring_packed_desc *virtqueue_get_desc_packed(struct vring_virtqueue *vq,
> @@ -1498,15 +1483,14 @@ static inline int virtqueue_add_vring_packed(struct vring_virtqueue *vq,
>  {
>         struct vring_packed_desc *desc;
>         struct scatterlist *sg;
> -       unsigned int i, n, c, descs_used, err_idx;
> +       unsigned int i, n, c, descs_used;
>         __le16 head_flags, flags;
> -       u16 head, id, prev, curr, avail_used_flags;
> +       u16 head, id, prev, curr;
>
>         desc = vq->packed.vring.desc;
>         head = vq->packed.next_avail_idx;
>         i = head;
>         descs_used = total_sg;
> -       avail_used_flags = vq->packed.avail_used_flags;
>
>         id = vq->free_head;
>         BUG_ON(id == vq->packed.vring.num);
> @@ -1515,11 +1499,6 @@ static inline int virtqueue_add_vring_packed(struct vring_virtqueue *vq,
>         c = 0;
>         for (n = 0; n < out_sgs + in_sgs; n++) {
>                 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
> -                       dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
> -                                       DMA_TO_DEVICE : DMA_FROM_DEVICE);
> -                       if (vring_mapping_error(vq, addr))
> -                               goto unmap_release;
> -
>                         flags = cpu_to_le16(vq->packed.avail_used_flags |
>                                     (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
>                                     (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
> @@ -1528,12 +1507,12 @@ static inline int virtqueue_add_vring_packed(struct vring_virtqueue *vq,
>                         else
>                                 desc[i].flags = flags;
>
> -                       desc[i].addr = cpu_to_le64(addr);
> +                       desc[i].addr = cpu_to_le64(sg->dma_address);
>                         desc[i].len = cpu_to_le32(sg->length);
>                         desc[i].id = cpu_to_le16(id);
>
>                         if (unlikely(vq->use_dma_api)) {
> -                               vq->packed.desc_extra[curr].addr = addr;
> +                               vq->packed.desc_extra[curr].addr = sg->dma_address;
>                                 vq->packed.desc_extra[curr].len = sg->length;
>                                 vq->packed.desc_extra[curr].flags =
>                                         le16_to_cpu(flags);
> @@ -1576,25 +1555,6 @@ static inline int virtqueue_add_vring_packed(struct vring_virtqueue *vq,
>         pr_debug("Added buffer head %i to %p\n", head, vq);
>
>         return 0;
> -
> -unmap_release:
> -       err_idx = i;
> -       i = head;
> -       curr = vq->free_head;
> -
> -       vq->packed.avail_used_flags = avail_used_flags;
> -
> -       for (n = 0; n < total_sg; n++) {
> -               if (i == err_idx)
> -                       break;
> -               vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
> -               curr = vq->packed.desc_extra[curr].next;
> -               i++;
> -               if (i >= vq->packed.vring.num)
> -                       i = 0;
> -       }
> -
> -       return -EIO;
>  }
>
>  static inline int virtqueue_add_packed(struct virtqueue *_vq,
> @@ -1621,6 +1581,10 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
>                 goto end;
>         }
>
> +       err = virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, in_sgs);
> +       if (err)
> +               goto err;
> +
>         id = vq->free_head;
>
>         if (desc) {
> @@ -1637,6 +1601,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
>         goto end;
>
>  err:
> +       virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs);

Similar to the case of split, if possible let's move it to
virtqueue_add_vring_packed() assuming virtqueue_add_vring_packed() and
virtqueue_add_indirect_packed() could be unified.

Thanks

>         kfree(desc);
>
>  end:

> --
> 2.32.0.3.g01195cf9f
>

_______________________________________________
Virtualization mailing list
Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx
https://lists.linuxfoundation.org/mailman/listinfo/virtualization




[Index of Archives]     [KVM Development]     [Libvirt Development]     [Libvirt Users]     [CentOS Virtualization]     [Netdev]     [Ethernet Bridging]     [Linux Wireless]     [Kernel Newbies]     [Security]     [Linux for Hams]     [Netfilter]     [Bugtraq]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux Admin]     [Samba]

  Powered by Linux