virtio core only supports virtual addresses, dma is completed in virtio core. In some scenarios (such as the AF_XDP), the memory is allocated and DMA mapping is completed in advance, so it is necessary for us to support passing the DMA address to virtio core. Drives can use sg->dma_address to pass the mapped dma address to virtio core. If one sg->dma_address is used then all sgs must use sg->dma_address, otherwise all dma_address must be null when passing it to the APIs of virtio. Signed-off-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> --- drivers/virtio/virtio_ring.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 4b6c6da4cdb4..0bc011e6e96e 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -1346,6 +1346,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, unsigned int i, n; u16 head, id; dma_addr_t addr; + bool dma_map_internal; head = vq->packed.next_avail_idx; desc = alloc_indirect_packed(total_sg, gfp); @@ -1363,7 +1364,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, id = vq->free_head; BUG_ON(id == vq->packed.vring.num); - if (virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, in_sgs)) + dma_map_internal = !sgs[0]->dma_address; + if (dma_map_internal && virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, in_sgs)) goto err_map; for (n = 0; n < out_sgs + in_sgs; n++) { @@ -1425,6 +1427,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, vq->packed.desc_state[id].data = data; vq->packed.desc_state[id].indir_desc = desc; vq->packed.desc_state[id].last = id; + vq->packed.desc_state[id].flags = dma_map_internal ? VRING_STATE_F_MAP_INTERNAL : 0; + vq->num_added += 1; @@ -1434,7 +1438,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, return 0; unmap_release: - virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs); + if (dma_map_internal) + virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs); err_map: kfree(desc); @@ -1661,7 +1666,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq, if (!desc) return; - if (vq->use_dma_api) { + if (vq->use_dma_api && dma_map_internal) { len = vq->packed.desc_extra[id].len; for (i = 0; i < len / sizeof(struct vring_packed_desc); i++) -- 2.32.0.3.g01195cf9f