No functional changes. Boilerplate to allow stuffing more data after xdp_buff. Cc: John Fastabend <john.fastabend@xxxxxxxxx> Cc: David Ahern <dsahern@xxxxxxxxx> Cc: Martin KaFai Lau <martin.lau@xxxxxxxxx> Cc: Jakub Kicinski <kuba@xxxxxxxxxx> Cc: Willem de Bruijn <willemb@xxxxxxxxxx> Cc: Jesper Dangaard Brouer <brouer@xxxxxxxxxx> Cc: Anatoly Burakov <anatoly.burakov@xxxxxxxxx> Cc: Alexander Lobakin <alexandr.lobakin@xxxxxxxxx> Cc: Magnus Karlsson <magnus.karlsson@xxxxxxxxx> Cc: Maryam Tahhan <mtahhan@xxxxxxxxxx> Cc: xdp-hints@xxxxxxxxxxxxxxx Cc: netdev@xxxxxxxxxxxxxxx Signed-off-by: Stanislav Fomichev <sdf@xxxxxxxxxx> --- drivers/net/ethernet/intel/ice/ice_txrx.c | 30 +++++++++++++---------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index dbe80e5053a8..1b6afa168501 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1096,6 +1096,10 @@ ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc) return true; } +struct ice_xdp_buff { + struct xdp_buff xdp; +}; + /** * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: Rx descriptor ring to transact packets on @@ -1117,14 +1121,14 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) unsigned int xdp_res, xdp_xmit = 0; struct sk_buff *skb = rx_ring->skb; struct bpf_prog *xdp_prog = NULL; - struct xdp_buff xdp; + struct ice_xdp_buff ixbuf; bool failure; /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ #if (PAGE_SIZE < 8192) frame_sz = ice_rx_frame_truesize(rx_ring, 0); #endif - xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); + xdp_init_buff(&ixbuf.xdp, frame_sz, &rx_ring->xdp_rxq); xdp_prog = READ_ONCE(rx_ring->xdp_prog); if (xdp_prog) @@ -1178,30 +1182,30 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt); if (!size) { - xdp.data = NULL; - xdp.data_end = NULL; - xdp.data_hard_start = NULL; - xdp.data_meta = NULL; + ixbuf.xdp.data = NULL; + ixbuf.xdp.data_end = NULL; + ixbuf.xdp.data_hard_start = NULL; + ixbuf.xdp.data_meta = NULL; goto construct_skb; } hard_start = page_address(rx_buf->page) + rx_buf->page_offset - offset; - xdp_prepare_buff(&xdp, hard_start, offset, size, true); + xdp_prepare_buff(&ixbuf.xdp, hard_start, offset, size, true); #if (PAGE_SIZE > 4096) /* At larger PAGE_SIZE, frame_sz depend on len size */ - xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); + ixbuf.xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); #endif if (!xdp_prog) goto construct_skb; - xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring); + xdp_res = ice_run_xdp(rx_ring, &ixbuf.xdp, xdp_prog, xdp_ring); if (!xdp_res) goto construct_skb; if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { xdp_xmit |= xdp_res; - ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz); + ice_rx_buf_adjust_pg_offset(rx_buf, ixbuf.xdp.frame_sz); } else { rx_buf->pagecnt_bias++; } @@ -1214,11 +1218,11 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) construct_skb: if (skb) { ice_add_rx_frag(rx_ring, rx_buf, skb, size); - } else if (likely(xdp.data)) { + } else if (likely(ixbuf.xdp.data)) { if (ice_ring_uses_build_skb(rx_ring)) - skb = ice_build_skb(rx_ring, rx_buf, &xdp); + skb = ice_build_skb(rx_ring, rx_buf, &ixbuf.xdp); else - skb = ice_construct_skb(rx_ring, rx_buf, &xdp); + skb = ice_construct_skb(rx_ring, rx_buf, &ixbuf.xdp); } /* exit if we failed to retrieve a buffer */ if (!skb) { -- 2.38.1.431.g37b22c650d-goog