On Tue, Nov 22, 2022 at 5:49 AM Tariq Toukan <ttoukan.linux@xxxxxxxxx> wrote: > > > > On 11/21/2022 8:25 PM, Stanislav Fomichev wrote: > > No functional changes. Boilerplate to allow stuffing more data after xdp_buff. > > > > Cc: Tariq Toukan <tariqt@xxxxxxxxxx> > > Cc: John Fastabend <john.fastabend@xxxxxxxxx> > > Cc: David Ahern <dsahern@xxxxxxxxx> > > Cc: Martin KaFai Lau <martin.lau@xxxxxxxxx> > > Cc: Jakub Kicinski <kuba@xxxxxxxxxx> > > Cc: Willem de Bruijn <willemb@xxxxxxxxxx> > > Cc: Jesper Dangaard Brouer <brouer@xxxxxxxxxx> > > Cc: Anatoly Burakov <anatoly.burakov@xxxxxxxxx> > > Cc: Alexander Lobakin <alexandr.lobakin@xxxxxxxxx> > > Cc: Magnus Karlsson <magnus.karlsson@xxxxxxxxx> > > Cc: Maryam Tahhan <mtahhan@xxxxxxxxxx> > > Cc: xdp-hints@xxxxxxxxxxxxxxx > > Cc: netdev@xxxxxxxxxxxxxxx > > Signed-off-by: Stanislav Fomichev <sdf@xxxxxxxxxx> > > --- > > drivers/net/ethernet/mellanox/mlx4/en_rx.c | 26 +++++++++++++--------- > > 1 file changed, 15 insertions(+), 11 deletions(-) > > > > diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c > > index 8f762fc170b3..467356633172 100644 > > --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c > > +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c > > @@ -661,17 +661,21 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, > > #define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4) > > #endif > > > > +struct mlx4_xdp_buff { > > + struct xdp_buff xdp; > > +}; > > + > > int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) > > { > > struct mlx4_en_priv *priv = netdev_priv(dev); > > int factor = priv->cqe_factor; > > struct mlx4_en_rx_ring *ring; > > + struct mlx4_xdp_buff this would helpmxbuf; > > as it doesn't go through an init function (only mxbuf.xdp does), better > init to zero. SG, will do, thanks! > > struct bpf_prog *xdp_prog; > > int cq_ring = cq->ring; > > bool doorbell_pending; > > bool xdp_redir_flush; > > struct mlx4_cqe *cqe; > > - struct xdp_buff xdp; > > int polled = 0; > > int index; > > > > @@ -681,7 +685,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud > > ring = priv->rx_ring[cq_ring]; > > > > xdp_prog = rcu_dereference_bh(ring->xdp_prog); > > - xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq); > > + xdp_init_buff(&mxbuf.xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq); > > doorbell_pending = false; > > xdp_redir_flush = false; > > > > @@ -776,24 +780,24 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud > > priv->frag_info[0].frag_size, > > DMA_FROM_DEVICE); > > > > - xdp_prepare_buff(&xdp, va - frags[0].page_offset, > > + xdp_prepare_buff(&mxbuf.xdp, va - frags[0].page_offset, > > frags[0].page_offset, length, false); > > - orig_data = xdp.data; > > + orig_data = mxbuf.xdp.data; > > > > - act = bpf_prog_run_xdp(xdp_prog, &xdp); > > + act = bpf_prog_run_xdp(xdp_prog, &mxbuf.xdp); > > > > - length = xdp.data_end - xdp.data; > > - if (xdp.data != orig_data) { > > - frags[0].page_offset = xdp.data - > > - xdp.data_hard_start; > > - va = xdp.data; > > + length = mxbuf.xdp.data_end - mxbuf.xdp.data; > > + if (mxbuf.xdp.data != orig_data) { > > + frags[0].page_offset = mxbuf.xdp.data - > > + mxbuf.xdp.data_hard_start; > > + va = mxbuf.xdp.data; > > } > > > > switch (act) { > > case XDP_PASS: > > break; > > case XDP_REDIRECT: > > - if (likely(!xdp_do_redirect(dev, &xdp, xdp_prog))) { > > + if (likely(!xdp_do_redirect(dev, &mxbuf.xdp, xdp_prog))) { > > ring->xdp_redirect++; > > xdp_redir_flush = true; > > frags[0].page = NULL;