This patch implement the logic of bind/unbind xsk pool to sq and rq. Signed-off-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> --- drivers/net/virtio_net.c | 53 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 41a5ea9b788d..7c379614fd22 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -295,6 +295,10 @@ struct send_queue { /* Record whether sq is in reset state. */ bool reset; + + struct xsk_buff_pool *xsk_pool; + + dma_addr_t xsk_hdr_dma_addr; }; /* Internal representation of a receive virtqueue */ @@ -497,6 +501,8 @@ struct virtio_net_common_hdr { }; }; +static struct virtio_net_common_hdr xsk_hdr; + static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, struct net_device *dev, @@ -5488,6 +5494,29 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu return err; } +static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi, + struct send_queue *sq, + struct xsk_buff_pool *pool) +{ + int err, qindex; + + qindex = sq - vi->sq; + + virtnet_tx_pause(vi, sq); + + err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf); + if (err) { + netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err); + pool = NULL; + } + + sq->xsk_pool = pool; + + virtnet_tx_resume(vi, sq); + + return err; +} + static int virtnet_xsk_pool_enable(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid) @@ -5496,6 +5525,7 @@ static int virtnet_xsk_pool_enable(struct net_device *dev, struct receive_queue *rq; struct device *dma_dev; struct send_queue *sq; + dma_addr_t hdr_dma; int err, size; if (vi->hdr_len > xsk_pool_get_headroom(pool)) @@ -5533,6 +5563,11 @@ static int virtnet_xsk_pool_enable(struct net_device *dev, if (!rq->xsk_buffs) return -ENOMEM; + hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len, + DMA_TO_DEVICE, 0); + if (virtqueue_dma_mapping_error(sq->vq, hdr_dma)) + return -ENOMEM; + err = xsk_pool_dma_map(pool, dma_dev, 0); if (err) goto err_xsk_map; @@ -5541,11 +5576,24 @@ static int virtnet_xsk_pool_enable(struct net_device *dev, if (err) goto err_rq; + err = virtnet_sq_bind_xsk_pool(vi, sq, pool); + if (err) + goto err_sq; + + /* Now, we do not support tx offset, so all the tx virtnet hdr is zero. + * So all the tx packets can share a single hdr. + */ + sq->xsk_hdr_dma_addr = hdr_dma; + return 0; +err_sq: + virtnet_rq_bind_xsk_pool(vi, rq, NULL); err_rq: xsk_pool_dma_unmap(pool, 0); err_xsk_map: + virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len, + DMA_TO_DEVICE, 0); return err; } @@ -5554,19 +5602,24 @@ static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid) struct virtnet_info *vi = netdev_priv(dev); struct xsk_buff_pool *pool; struct receive_queue *rq; + struct send_queue *sq; int err; if (qid >= vi->curr_queue_pairs) return -EINVAL; + sq = &vi->sq[qid]; rq = &vi->rq[qid]; pool = rq->xsk_pool; err = virtnet_rq_bind_xsk_pool(vi, rq, NULL); + err |= virtnet_sq_bind_xsk_pool(vi, sq, NULL); xsk_pool_dma_unmap(pool, 0); + virtqueue_dma_unmap_single_attrs(sq->vq, sq->xsk_hdr_dma_addr, + vi->hdr_len, DMA_TO_DEVICE, 0); kvfree(rq->xsk_buffs); return err; -- 2.32.0.3.g01195cf9f