poll tx call virtnet_xsk_run, then the data in the xsk tx queue will be continuously consumed by napi. Signed-off-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> Reviewed-by: Dust Li <dust.li@xxxxxxxxxxxxxxxxx> --- drivers/net/virtio_net.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d7e95f55478d..fac7d0020013 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -264,6 +264,9 @@ struct padded_vnet_hdr { char padding[4]; }; +static int virtnet_xsk_run(struct send_queue *sq, struct xsk_buff_pool *pool, + int budget, bool in_napi); + static bool is_xdp_frame(void *ptr) { return (unsigned long)ptr & VIRTIO_XDP_FLAG; @@ -1553,7 +1556,9 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) struct send_queue *sq = container_of(napi, struct send_queue, napi); struct virtnet_info *vi = sq->vq->vdev->priv; unsigned int index = vq2txq(sq->vq); + struct xsk_buff_pool *pool; struct netdev_queue *txq; + int work = 0; if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { /* We don't need to enable cb for XDP */ @@ -1563,15 +1568,24 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) txq = netdev_get_tx_queue(vi->dev, index); __netif_tx_lock(txq, raw_smp_processor_id()); - free_old_xmit_skbs(sq, true); + rcu_read_lock(); + pool = rcu_dereference(sq->xsk.pool); + if (pool) { + work = virtnet_xsk_run(sq, pool, budget, true); + rcu_read_unlock(); + } else { + rcu_read_unlock(); + free_old_xmit_skbs(sq, true); + } __netif_tx_unlock(txq); - virtqueue_napi_complete(napi, sq->vq, 0); + if (work < budget) + virtqueue_napi_complete(napi, sq->vq, 0); if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) netif_tx_wake_queue(txq); - return 0; + return work; } static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) -- 2.31.0