Re: [PATCH vhost v2 04/12] virtio_ring: split: support premapped

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, Mar 8, 2023 at 2:44 PM Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> wrote:
>
> virtio core only supports virtual addresses, dma is completed in virtio
> core.
>
> In some scenarios (such as the AF_XDP), the memory is allocated
> and DMA is completed in advance, so it is necessary for us to support

Should be "DMA mapping is completed".

> passing the DMA address to virtio core.
>
> Drives can use sg->dma_address to pass the mapped dma address to virtio
> core. If one sg->dma_address is used then all sgs must use sg->
> dma_address, otherwise all dma_address must be null.

"must be null when passing it to virtqueue_add_sgs()"?

>
> On the non-indirect path,

Should it be "direct desc path"?

> if dma_address is used, extra.addr will be
> set to DMA_MAPPING_ERROR. So when do unmap, we can pass it.

I don't get the meaning of "pass" here. Or do you mean
DMA_MAPING_ERROR is a hint that the desc is mapped by the upper layer
instead of the virtio core?

>
> Signed-off-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx>
> ---
>  drivers/virtio/virtio_ring.c | 22 ++++++++++++++--------
>  1 file changed, 14 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> index 221ff54fe58b..61deaf0a4faf 100644
> --- a/drivers/virtio/virtio_ring.c
> +++ b/drivers/virtio/virtio_ring.c
> @@ -457,6 +457,9 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
>                                  (flags & VRING_DESC_F_WRITE) ?
>                                  DMA_FROM_DEVICE : DMA_TO_DEVICE);
>         } else {
> +               if (extra[i].addr == DMA_MAPPING_ERROR)
> +                       goto out;
> +
>                 dma_unmap_page(vring_dma_dev(vq),
>                                extra[i].addr,
>                                extra[i].len,
> @@ -497,7 +500,8 @@ static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
>                                                     dma_addr_t addr,
>                                                     unsigned int len,
>                                                     u16 flags,
> -                                                   bool indirect)
> +                                                   bool indirect,
> +                                                   bool do_map)
>  {
>         struct vring_virtqueue *vring = to_vvq(vq);
>         struct vring_desc_extra *extra = vring->split.desc_extra;
> @@ -511,7 +515,7 @@ static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
>                 next = extra[i].next;
>                 desc[i].next = cpu_to_virtio16(vq->vdev, next);
>
> -               extra[i].addr = addr;
> +               extra[i].addr = do_map ? addr : DMA_MAPPING_ERROR;

Any reason we don't need to do the same for indirect descriptors?

Thanks

>                 extra[i].len = len;
>                 extra[i].flags = flags;
>         } else
> @@ -604,7 +608,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
>         struct scatterlist *sg;
>         struct vring_desc *desc;
>         unsigned int i, n, avail, descs_used, prev;
> -       bool indirect;
> +       bool indirect, do_map;
>         int head;
>
>         START_USE(vq);
> @@ -657,7 +661,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
>                 return -ENOSPC;
>         }
>
> -       if (virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, in_sgs))
> +       do_map = !sgs[0]->dma_address;
> +       if (do_map && virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, in_sgs))
>                 return -ENOMEM;
>
>         for (n = 0; n < out_sgs; n++) {
> @@ -670,7 +675,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
>                                                      sg->dma_address,
>                                                      sg->length,
>                                                      VRING_DESC_F_NEXT,
> -                                                    indirect);
> +                                                    indirect, do_map);
>                 }
>         }
>         for (; n < (out_sgs + in_sgs); n++) {
> @@ -684,7 +689,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
>                                                      sg->length,
>                                                      VRING_DESC_F_NEXT |
>                                                      VRING_DESC_F_WRITE,
> -                                                    indirect);
> +                                                    indirect, do_map);
>                 }
>         }
>         /* Last one doesn't continue. */
> @@ -705,7 +710,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
>                                          head, addr,
>                                          total_sg * sizeof(struct vring_desc),
>                                          VRING_DESC_F_INDIRECT,
> -                                        false);
> +                                        false, true);
>         }
>
>         /* We're using some buffers from the free list. */
> @@ -748,7 +753,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
>         return 0;
>
>  unmap_release:
> -       virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs);
> +       if (do_map)
> +               virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs);
>
>         if (indirect)
>                 kfree(desc);
> --
> 2.32.0.3.g01195cf9f
>

_______________________________________________
Virtualization mailing list
Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx
https://lists.linuxfoundation.org/mailman/listinfo/virtualization




[Index of Archives]     [KVM Development]     [Libvirt Development]     [Libvirt Users]     [CentOS Virtualization]     [Netdev]     [Ethernet Bridging]     [Linux Wireless]     [Kernel Newbies]     [Security]     [Linux for Hams]     [Netfilter]     [Bugtraq]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux Admin]     [Samba]

  Powered by Linux