To make the code readable, introduce vring_need_unmap_buffer() to replace do_unmap. use_dma_api premapped -> vring_need_unmap_buffer() 1. false false false 2. true false true 3. true true false Signed-off-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> Acked-by: Jason Wang <jasowang@xxxxxxxxxx> --- drivers/virtio/virtio_ring.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 2a972752ff1b..df8eb0521aa0 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -175,11 +175,6 @@ struct vring_virtqueue { /* Do DMA mapping by driver */ bool premapped; - /* Do unmap or not for desc. Just when premapped is False and - * use_dma_api is true, this is true. - */ - bool do_unmap; - /* Head of free buffer list. */ unsigned int free_head; /* Number we've added since last sync. */ @@ -297,6 +292,11 @@ static bool vring_use_dma_api(const struct virtio_device *vdev) return false; } +static bool vring_need_unmap_buffer(const struct vring_virtqueue *vring) +{ + return vring->use_dma_api && !vring->premapped; +} + size_t virtio_max_dma_size(const struct virtio_device *vdev) { size_t max_segment_size = SIZE_MAX; @@ -445,7 +445,7 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq, { u16 flags; - if (!vq->do_unmap) + if (!vring_need_unmap_buffer(vq)) return; flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); @@ -475,7 +475,7 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } else { - if (!vq->do_unmap) + if (!vring_need_unmap_buffer(vq)) goto out; dma_unmap_page(vring_dma_dev(vq), @@ -643,7 +643,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, } /* Last one doesn't continue. */ desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); - if (!indirect && vq->do_unmap) + if (!indirect && vring_need_unmap_buffer(vq)) vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &= ~VRING_DESC_F_NEXT; @@ -802,7 +802,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, VRING_DESC_F_INDIRECT)); BUG_ON(len == 0 || len % sizeof(struct vring_desc)); - if (vq->do_unmap) { + if (vring_need_unmap_buffer(vq)) { for (j = 0; j < len / sizeof(struct vring_desc); j++) vring_unmap_one_split_indirect(vq, &indir_desc[j]); } @@ -1236,7 +1236,7 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq, (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } else { - if (!vq->do_unmap) + if (!vring_need_unmap_buffer(vq)) return; dma_unmap_page(vring_dma_dev(vq), @@ -1251,7 +1251,7 @@ static void vring_unmap_desc_packed(const struct vring_virtqueue *vq, { u16 flags; - if (!vq->do_unmap) + if (!vring_need_unmap_buffer(vq)) return; flags = le16_to_cpu(desc->flags); @@ -1632,7 +1632,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq, if (!desc) return; - if (vq->do_unmap) { + if (vring_need_unmap_buffer(vq)) { len = vq->packed.desc_extra[id].len; for (i = 0; i < len / sizeof(struct vring_packed_desc); i++) @@ -2091,7 +2091,6 @@ static struct virtqueue *vring_create_virtqueue_packed( vq->dma_dev = dma_dev; vq->use_dma_api = vring_use_dma_api(vdev); vq->premapped = false; - vq->do_unmap = vq->use_dma_api; vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && !context; @@ -2636,7 +2635,6 @@ static struct virtqueue *__vring_new_virtqueue(unsigned int index, vq->dma_dev = dma_dev; vq->use_dma_api = vring_use_dma_api(vdev); vq->premapped = false; - vq->do_unmap = vq->use_dma_api; vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && !context; @@ -2799,7 +2797,6 @@ int virtqueue_set_dma_premapped(struct virtqueue *_vq) } vq->premapped = true; - vq->do_unmap = false; END_USE(vq); -- 2.32.0.3.g01195cf9f