networking benchmark shows that __rcu_read_lock and __rcu_read_unlock takes some cpu cycles, and we can avoid calling them partially in virtio rx path by check xdp_enabled of vi, and xdp is disabled most of time Signed-off-by: Li RongQing <lirongqing@xxxxxxxxx> --- drivers/net/virtio_net.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 2ed49884565f..74d2be438180 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -734,6 +734,12 @@ static struct sk_buff *receive_small(struct net_device *dev, dev->stats.rx_length_errors++; goto err_len; } + + if (likely(!vi->xdp_enabled)) { + xdp_prog = NULL; + goto skip_xdp; + } + rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { @@ -816,6 +822,7 @@ static struct sk_buff *receive_small(struct net_device *dev, } rcu_read_unlock(); +skip_xdp: skb = build_skb(buf, buflen); if (!skb) { put_page(page); @@ -897,6 +904,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, dev->stats.rx_length_errors++; goto err_skb; } + + if (likely(!vi->xdp_enabled)) { + xdp_prog = NULL; + goto skip_xdp; + } + rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { @@ -1024,6 +1037,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } rcu_read_unlock(); +skip_xdp: head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog, metasize, headroom); curr_skb = head_skb; -- 2.33.0.69.gc420321.dirty