On Tue, Jan 30, 2024 at 7:42 PM Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> wrote: > > In the functions vring_unmap_one_split and > vring_unmap_one_split_indirect, > multiple checks are made whether unmap is performed and whether it is > INDIRECT. > > These two functions are usually called in a loop, and we should put the > check outside the loop. > > And we unmap the descs with VRING_DESC_F_INDIRECT on the same path with > other descs, that make the thing more complex. If we distinguish the > descs with VRING_DESC_F_INDIRECT before unmap, thing will be clearer. > > 1. only one desc of the desc table is used, we do not need the loop > 2. the called unmap api is difference from the other desc > 3. the vq->premapped is not needed to check > 4. the vq->indirect is not needed to check > 5. the state->indir_desc must not be null > > Signed-off-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> > --- > drivers/virtio/virtio_ring.c | 80 ++++++++++++++++++------------------ > 1 file changed, 39 insertions(+), 41 deletions(-) > > diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c > index dd03bc5a81fe..2b41fdbce975 100644 > --- a/drivers/virtio/virtio_ring.c > +++ b/drivers/virtio/virtio_ring.c > @@ -452,9 +452,6 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq, > { > u16 flags; > > - if (!vring_need_unmap_buffer(vq)) > - return; > - > flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); > > dma_unmap_page(vring_dma_dev(vq), > @@ -472,27 +469,12 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, > > flags = extra[i].flags; > > - if (flags & VRING_DESC_F_INDIRECT) { > - if (!vq->use_dma_api) > - goto out; > - > - dma_unmap_single(vring_dma_dev(vq), > - extra[i].addr, > - extra[i].len, > - (flags & VRING_DESC_F_WRITE) ? > - DMA_FROM_DEVICE : DMA_TO_DEVICE); > - } else { > - if (!vring_need_unmap_buffer(vq)) > - goto out; > - > - dma_unmap_page(vring_dma_dev(vq), > - extra[i].addr, > - extra[i].len, > - (flags & VRING_DESC_F_WRITE) ? > - DMA_FROM_DEVICE : DMA_TO_DEVICE); > - } > + dma_unmap_page(vring_dma_dev(vq), > + extra[i].addr, > + extra[i].len, > + (flags & VRING_DESC_F_WRITE) ? > + DMA_FROM_DEVICE : DMA_TO_DEVICE); > > -out: > return extra[i].next; > } > > @@ -660,7 +642,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > vq, desc, total_sg * sizeof(struct vring_desc), > DMA_TO_DEVICE); > if (vring_mapping_error(vq, addr)) { > - if (vq->premapped) > + if (!vring_need_unmap_buffer(vq)) > goto free_indirect; > > goto unmap_release; > @@ -713,6 +695,9 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > return 0; > > unmap_release: > + > + WARN_ON(!vring_need_unmap_buffer(vq)); > + > err_idx = i; > > if (indirect) > @@ -774,34 +759,42 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, > { > unsigned int i, j; > __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); > + u16 flags; > > /* Clear data ptr. */ > vq->split.desc_state[head].data = NULL; > + flags = vq->split.desc_extra[head].flags; > > /* Put back on free list: unmap first-level descriptors and find end */ > i = head; > > - while (vq->split.vring.desc[i].flags & nextflag) { > - vring_unmap_one_split(vq, i); > - i = vq->split.desc_extra[i].next; > - vq->vq.num_free++; > - } > - > - vring_unmap_one_split(vq, i); > - vq->split.desc_extra[i].next = vq->free_head; > - vq->free_head = head; > + if (!(flags & VRING_DESC_F_INDIRECT)) { So during add we do: if (!indirect && vring_need_unmap_buffer(vq)) vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &= ~VRING_DESC_F_NEXT; Then using flags here unconditionally seems not reliable. I post a patch to store flags unconditionally at: https://lore.kernel.org/all/20220224122655-mutt-send-email-mst@xxxxxxxxxx/ > + while (vq->split.vring.desc[i].flags & nextflag) { > + if (vring_need_unmap_buffer(vq)) > + vring_unmap_one_split(vq, i); > + i = vq->split.desc_extra[i].next; > + vq->vq.num_free++; > + } > > - /* Plus final descriptor */ > - vq->vq.num_free++; > + if (vring_need_unmap_buffer(vq)) > + vring_unmap_one_split(vq, i); > > - if (vq->indirect) { > + if (ctx) > + *ctx = vq->split.desc_state[head].indir_desc; > + } else { > struct vring_desc *indir_desc = > vq->split.desc_state[head].indir_desc; > u32 len; > > - /* Free the indirect table, if any, now that it's unmapped. */ > - if (!indir_desc) > - return; > + if (vq->use_dma_api) { > + struct vring_desc_extra *extra = vq->split.desc_extra; > + > + dma_unmap_single(vring_dma_dev(vq), > + extra[i].addr, > + extra[i].len, > + (flags & VRING_DESC_F_WRITE) ? > + DMA_FROM_DEVICE : DMA_TO_DEVICE); > + } Note that there's a following BUG_ON(!(vq->split.desc_extra[head].flags & VRING_DESC_F_INDIRECT)); Which I think we can remove. Thanks