vring_reuse_bufs_packed() checks vring.desc forward based on last_used_idx. The resulting buffers are the order in which they were committed. It is beneficial to ensure the order of buffers in the process of reuse. For example, under virtio-net, if the order is not guaranteed, it may lead to out-of-order tcp streams. Signed-off-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> --- drivers/virtio/virtio_ring.c | 45 ++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 8ca9985ffb4b..66f71e22ece0 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -2041,6 +2041,51 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq) return true; } +static void vring_reuse_bufs_packed(struct vring_virtqueue *vq, + struct vring_virtqueue_packed *vring, + void (*recycle)(struct virtqueue *vq, void *buf)) +{ + struct vring_desc_state_packed *state; + u32 last_used, id, desc_num = 0; + int err = 0; + void *buf; + + last_used = vring->last_used_idx; + + do { + id = le16_to_cpu(vring->vring.desc[last_used].id); + + state = &vring->desc_state[id]; + + if (!state->data) { + last_used++; + goto next; + } + + /* once add to vq fail, no more try add to vq. */ + if (err >= 0) { + err = vring_copy_to_vq_packed(vq, vring, id); + if (err >= 0) + goto ok; + } + + buf = state->data; + detach_buf_from_vring_packed(vring, vq, id, 0, NULL); + recycle(&vq->vq, buf); + +ok: + last_used += state->num; + desc_num += state->num; + +next: + if (unlikely(last_used >= vring->vring.num)) + last_used -= vring->vring.num; + + } while (last_used != vring->last_used_idx); + + WARN_ON(vring->num_left != desc_num); +} + static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); -- 2.31.0 _______________________________________________ Virtualization mailing list Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx https://lists.linuxfoundation.org/mailman/listinfo/virtualization