On Tue, 2024-04-02 at 12:33 +0200, Julien Panis wrote: [...] > +static int am65_cpsw_run_xdp(struct am65_cpsw_common *common, struct am65_cpsw_port *port, > + struct xdp_buff *xdp, int desc_idx, int cpu, int *len) > +{ > + struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; > + struct net_device *ndev = port->ndev; > + int ret = AM65_CPSW_XDP_CONSUMED; > + struct am65_cpsw_tx_chn *tx_chn; > + struct netdev_queue *netif_txq; > + struct xdp_frame *xdpf; > + struct bpf_prog *prog; > + struct page *page; > + u32 act; > + > + prog = READ_ONCE(port->xdp_prog); > + if (!prog) > + return AM65_CPSW_XDP_PASS; > + > + act = bpf_prog_run_xdp(prog, xdp); > + /* XDP prog might have changed packet data and boundaries */ > + *len = xdp->data_end - xdp->data; > + > + switch (act) { > + case XDP_PASS: > + ret = AM65_CPSW_XDP_PASS; > + goto out; > + case XDP_TX: > + tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES]; > + netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); > + > + xdpf = xdp_convert_buff_to_frame(xdp); > + if (unlikely(!xdpf)) > + break; > + > + __netif_tx_lock(netif_txq, cpu); > + ret = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf, > + AM65_CPSW_TX_BUF_TYPE_XDP_TX); > + __netif_tx_unlock(netif_txq); > + if (ret) > + break; > + > + ndev->stats.rx_bytes += *len; > + ndev->stats.rx_packets++; > + ret = AM65_CPSW_XDP_CONSUMED; > + goto out; > + case XDP_REDIRECT: > + if (unlikely(xdp_do_redirect(ndev, xdp, prog))) > + break; > + > + xdp_do_flush(); The above will kill XDP redirect performances. Even if this HW has the same limitation of cpsw, the above will still deserve an explicit comment. Quickly skimming over the code it does not look so, so you could possibly move xdp_do_flush() in am65_cpsw_nuss_rx_poll(). Cheers, Paolo