Implement the logic of filling rq with XSK buffers. Signed-off-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> --- drivers/net/virtio_net.c | 64 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 62 insertions(+), 2 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d6be569bd849..4e5645d8bb7d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -388,6 +388,8 @@ struct receive_queue { /* xdp rxq used by xsk */ struct xdp_rxq_info xdp_rxq; + + struct xdp_buff **xsk_buffs; } xsk; }; @@ -532,6 +534,8 @@ struct virtio_net_common_hdr { static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); static void virtnet_xsk_completed(struct send_queue *sq, int num); +static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq, + struct xsk_buff_pool *pool, gfp_t gfp); enum virtnet_xmit_type { VIRTNET_XMIT_TYPE_SKB, @@ -1304,6 +1308,47 @@ static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len) sg->length = len; } +static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq, + struct xsk_buff_pool *pool, gfp_t gfp) +{ + struct xdp_buff **xsk_buffs; + dma_addr_t addr; + u32 len, i; + int err = 0; + int num; + + xsk_buffs = rq->xsk.xsk_buffs; + + num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free); + if (!num) + return -ENOMEM; + + len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len; + + for (i = 0; i < num; ++i) { + /* use the part of XDP_PACKET_HEADROOM as the virtnet hdr space */ + addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len; + + sg_init_table(rq->sg, 1); + sg_fill_dma(rq->sg, addr, len); + + err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, xsk_buffs[i], gfp); + if (err) + goto err; + } + + return num; + +err: + if (i) + err = i; + + for (; i < num; ++i) + xsk_buff_free(xsk_buffs[i]); + + return err; +} + static int virtnet_xsk_xmit_one(struct send_queue *sq, struct xsk_buff_pool *pool, struct xdp_desc *desc) @@ -2560,6 +2605,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, int err; bool oom; + if (rq->xsk.pool) { + err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk.pool, gfp); + goto kick; + } + do { if (vi->mergeable_rx_bufs) err = add_recvbuf_mergeable(vi, rq, gfp); @@ -2568,10 +2618,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, else err = add_recvbuf_small(vi, rq, gfp); - oom = err == -ENOMEM; if (err) break; } while (rq->vq->num_free); + +kick: if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { unsigned long flags; @@ -2580,6 +2631,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); } + oom = err == -ENOMEM; return !oom; } @@ -5449,7 +5501,7 @@ static int virtnet_xsk_pool_enable(struct net_device *dev, struct send_queue *sq; struct device *dma_dev; dma_addr_t hdr_dma; - int err; + int err, size; /* In big_packets mode, xdp cannot work, so there is no need to * initialize xsk of rq. @@ -5484,6 +5536,12 @@ static int virtnet_xsk_pool_enable(struct net_device *dev, if (!dma_dev) return -EPERM; + size = virtqueue_get_vring_size(rq->vq); + + rq->xsk.xsk_buffs = kvcalloc(size, sizeof(*rq->xsk.xsk_buffs), GFP_KERNEL); + if (!rq->xsk.xsk_buffs) + return -ENOMEM; + hdr_dma = dma_map_single(dma_dev, &xsk_hdr, vi->hdr_len, DMA_TO_DEVICE); if (dma_mapping_error(dma_dev, hdr_dma)) return -ENOMEM; @@ -5542,6 +5600,8 @@ static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid) dma_unmap_single(dma_dev, sq->xsk.hdr_dma_address, vi->hdr_len, DMA_TO_DEVICE); + kvfree(rq->xsk.xsk_buffs); + return err1 | err2; } -- 2.32.0.3.g01195cf9f