On Mon, Oct 16, 2023 at 8:01 PM Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> wrote: > > If the xsk is enabling, the xsk tx will share the send queue. > But the xsk requires that the send queue use the premapped mode. > So the send queue must support premapped mode. > > Signed-off-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> > --- > drivers/net/virtio/main.c | 108 ++++++++++++++++++++++++++++---- > drivers/net/virtio/virtio_net.h | 54 +++++++++++++++- > 2 files changed, 149 insertions(+), 13 deletions(-) > > diff --git a/drivers/net/virtio/main.c b/drivers/net/virtio/main.c > index 8da84ea9bcbe..02d27101fef1 100644 > --- a/drivers/net/virtio/main.c > +++ b/drivers/net/virtio/main.c > @@ -514,20 +514,104 @@ static void *virtnet_rq_alloc(struct virtnet_rq *rq, u32 size, gfp_t gfp) > return buf; > } > > -static void virtnet_rq_set_premapped(struct virtnet_info *vi) > +static int virtnet_sq_set_premapped(struct virtnet_sq *sq) > { > - int i; > + struct virtnet_sq_dma *d; > + int err, size, i; > > - /* disable for big mode */ > - if (!vi->mergeable_rx_bufs && vi->big_packets) > - return; Not specific to this patch but any plan to fix the big mode? > + size = virtqueue_get_vring_size(sq->vq); > + > + size += MAX_SKB_FRAGS + 2; > + > + sq->dmainfo.head = kcalloc(size, sizeof(*sq->dmainfo.head), GFP_KERNEL); > + if (!sq->dmainfo.head) > + return -ENOMEM; > + > + err = virtqueue_set_dma_premapped(sq->vq); > + if (err) { > + kfree(sq->dmainfo.head); > + return err; > + } > + > + sq->dmainfo.free = NULL; > + > + sq->do_dma = true; > + > + for (i = 0; i < size; ++i) { > + d = &sq->dmainfo.head[i]; > + > + d->next = sq->dmainfo.free; > + sq->dmainfo.free = d; > + } > + > + return 0; > +} > + > +static void virtnet_set_premapped(struct virtnet_info *vi) > +{ > + int i; > > for (i = 0; i < vi->max_queue_pairs; i++) { > - if (virtqueue_set_dma_premapped(vi->rq[i].vq)) > + if (!virtnet_sq_set_premapped(&vi->sq[i])) > + vi->sq[i].do_dma = true; > + > + /* disable for big mode */ > + if (!vi->mergeable_rx_bufs && vi->big_packets) > continue; > > - vi->rq[i].do_dma = true; > + if (!virtqueue_set_dma_premapped(vi->rq[i].vq)) > + vi->rq[i].do_dma = true; > + } > +} > + > +static struct virtnet_sq_dma *virtnet_sq_map_sg(struct virtnet_sq *sq, int nents, void *data) > +{ > + struct virtnet_sq_dma *d, *head; > + struct scatterlist *sg; > + int i; > + > + head = NULL; > + > + for_each_sg(sq->sg, sg, nents, i) { > + sg->dma_address = virtqueue_dma_map_single_attrs(sq->vq, sg_virt(sg), > + sg->length, > + DMA_TO_DEVICE, 0); > + if (virtqueue_dma_mapping_error(sq->vq, sg->dma_address)) > + goto err; > + > + d = sq->dmainfo.free; > + sq->dmainfo.free = d->next; > + > + d->addr = sg->dma_address; > + d->len = sg->length; > + > + d->next = head; > + head = d; It's really a pity that we need to duplicate those DMA metata twice. Could we invent a new API to just fetch it from the virtio core? > + } > + > + head->data = data; > + > + return (void *)((unsigned long)head | ((unsigned long)data & VIRTIO_XMIT_DATA_MASK)); If we packed everything into dmainfo, we can leave the type (XDP vs skb) there to avoid trick like packing it into the pointer here? Thanks