在 2022/1/7 下午2:33, Xuan Zhuo 写道:
virtqueue_add_split() only supports virtual addresses, dma is completed
in virtqueue_add_split().
In some scenarios (such as the AF_XDP scenario), the memory is allocated
and DMA is completed in advance, so it is necessary for us to support
passing the DMA address to virtqueue_add_split().
This patch stipulates that if sg->dma_address is not NULL, use this
address as the DMA address. And record this information in extra->flags,
which can be skipped when executing dma unmap.
extra->flags |= VRING_DESC_F_PREDMA;
I think we need another name other than the VRING_DESC_F prefix since
it's for the flag defined in the spec. May VIRTIO_DESC_F_PREDMA.
Thanks
This relies on the previous patch, in the indirect scenario, for each
desc allocated, an extra is allocated at the same time.
Signed-off-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx>
---
drivers/virtio/virtio_ring.c | 28 ++++++++++++++++++++++++----
1 file changed, 24 insertions(+), 4 deletions(-)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 7420741cb750..add8430d9678 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -66,6 +66,9 @@
#define LAST_ADD_TIME_INVALID(vq)
#endif
+/* This means the buffer dma is pre-alloc. Just used by vring_desc_extra */
+#define VRING_DESC_F_PREDMA (1 << 15)
+
struct vring_desc_extra {
dma_addr_t addr; /* Descriptor DMA addr. */
u32 len; /* Descriptor length. */
@@ -336,11 +339,19 @@ static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
return vq->vq.vdev->dev.parent;
}
+static inline bool sg_is_predma(struct scatterlist *sg)
+{
+ return !!sg->dma_address;
+}
+
/* Map one sg entry. */
static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
struct scatterlist *sg,
enum dma_data_direction direction)
{
+ if (sg_is_predma(sg))
+ return sg_dma_address(sg);
+
if (!vq->use_dma_api)
return (dma_addr_t)sg_phys(sg);
@@ -396,6 +407,9 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
} else {
+ if (flags & VRING_DESC_F_PREDMA)
+ goto out;
+
dma_unmap_page(vring_dma_dev(vq),
extra->addr,
extra->len,
@@ -441,7 +455,8 @@ static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
unsigned int i,
dma_addr_t addr,
unsigned int len,
- u16 flags)
+ u16 flags,
+ bool predma)
{
struct vring_virtqueue *vring = to_vvq(vq);
struct vring_desc_extra *extra;
@@ -468,6 +483,9 @@ static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
extra->len = len;
extra->flags = flags;
+ if (predma)
+ extra->flags |= VRING_DESC_F_PREDMA;
+
return next;
}
@@ -547,7 +565,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
* table since it use stream DMA mapping.
*/
i = virtqueue_add_desc_split(_vq, in, i, addr, sg->length,
- VRING_DESC_F_NEXT);
+ VRING_DESC_F_NEXT,
+ sg_is_predma(sg));
}
}
for (; n < (out_sgs + in_sgs); n++) {
@@ -563,7 +582,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
i = virtqueue_add_desc_split(_vq, in, i, addr,
sg->length,
VRING_DESC_F_NEXT |
- VRING_DESC_F_WRITE);
+ VRING_DESC_F_WRITE,
+ sg_is_predma(sg));
}
}
/* Last one doesn't continue. */
@@ -582,7 +602,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
virtqueue_add_desc_split(_vq, NULL, head, addr,
total_sg * sizeof(struct vring_desc),
- VRING_DESC_F_INDIRECT);
+ VRING_DESC_F_INDIRECT, false);
}
/* We're using some buffers from the free list. */
_______________________________________________
Virtualization mailing list
Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx
https://lists.linuxfoundation.org/mailman/listinfo/virtualization