[PATCH 31/33] virtio_net: xsk: tx: auto wakeup when free old xmit

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



If the XSK xmit stops because the TX queue is full, this time is
waiting for the TX interrupt to trigger the follow-up work again.

But for Virtio Net, the recycling old buf is not only completed in tx
napi, but also is called in start_xmit(), rx poll and other places.

So if xsk xmit stop by full tx queue, __free_old_xmit() will try to
wakeup tx napi.

Signed-off-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx>
---
 drivers/net/virtio/virtio_net.h |  5 +++--
 drivers/net/virtio/xsk.c        | 30 ++++++++++++++++++++++++++++++
 drivers/net/virtio/xsk.h        |  1 +
 3 files changed, 34 insertions(+), 2 deletions(-)

diff --git a/drivers/net/virtio/virtio_net.h b/drivers/net/virtio/virtio_net.h
index fc7c7a0f3c89..100ce48c6d55 100644
--- a/drivers/net/virtio/virtio_net.h
+++ b/drivers/net/virtio/virtio_net.h
@@ -176,6 +176,8 @@ struct send_queue {
 		dma_addr_t hdr_dma_address;
 
 		u32 last_cpu;
+
+		bool need_wakeup;
 	} xsk;
 };
 
@@ -296,8 +298,7 @@ static void __free_old_xmit(struct send_queue *sq, bool in_napi,
 		stats->packets++;
 	}
 
-	if (xsknum)
-		xsk_tx_completed(sq->xsk.pool, xsknum);
+	virtnet_xsk_complete(sq, xsknum, in_napi);
 }
 
 int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
diff --git a/drivers/net/virtio/xsk.c b/drivers/net/virtio/xsk.c
index 27b7f0bb2d34..043b0bf2a5d7 100644
--- a/drivers/net/virtio/xsk.c
+++ b/drivers/net/virtio/xsk.c
@@ -116,6 +116,7 @@ bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
 	bool busy;
 	int ret;
 
+	sq->xsk.need_wakeup = false;
 	__free_old_xmit(sq, true, &stats);
 
 	if (xsk_uses_need_wakeup(pool))
@@ -138,6 +139,13 @@ bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
 		 * triggered by interrupt.
 		 */
 		busy = false;
+
+		/* tx poll may not be triggered by tx interruption because of
+		 * that start_xmit() and rx poll will try free old xmit that
+		 * cause no tx interruption will be generated. So set
+		 * need_wakeup, then tx poll can be triggered by free_old_xmit.
+		 */
+		sq->xsk.need_wakeup = true;
 		break;
 	}
 
@@ -206,6 +214,26 @@ int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
 	return 0;
 }
 
+void virtnet_xsk_complete(struct send_queue *sq, u32 num, bool in_napi)
+{
+	struct xsk_buff_pool *pool;
+
+	rcu_read_lock();
+
+	pool = rcu_dereference(sq->xsk.pool);
+	if (pool) {
+		if (num)
+			xsk_tx_completed(pool, num);
+
+		if (sq->xsk.need_wakeup) {
+			sq->xsk.need_wakeup = false;
+			virtnet_xsk_wakeup_sq(sq, in_napi);
+		}
+	}
+
+	rcu_read_unlock();
+}
+
 static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queue *rq,
 				    struct xsk_buff_pool *pool, struct net_device *dev)
 {
@@ -298,6 +326,8 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
 	if (err)
 		goto err_rxq;
 
+	sq->xsk.need_wakeup = false;
+
 	/* Here is already protected by rtnl_lock, so rcu_assign_pointer
 	 * is safe.
 	 */
diff --git a/drivers/net/virtio/xsk.h b/drivers/net/virtio/xsk.h
index 5eece0de3310..f90c28972d72 100644
--- a/drivers/net/virtio/xsk.h
+++ b/drivers/net/virtio/xsk.h
@@ -19,6 +19,7 @@ static inline u32 ptr_to_xsk(void *ptr)
 	return ((unsigned long)ptr) >> VIRTIO_XSK_FLAG_OFFSET;
 }
 
+void virtnet_xsk_complete(struct send_queue *sq, u32 num, bool in_napi);
 int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp);
 bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
 		      int budget);
-- 
2.32.0.3.g01195cf9f




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux