On Wed, Mar 05, 2025 at 05:21:27PM +0100, Alexander Lobakin wrote: > From: Michal Kubiak <michal.kubiak@xxxxxxxxx> > > Extend basic structures of the driver (e.g. 'idpf_vport', 'idpf_*_queue', > 'idpf_vport_user_config_data') by adding members necessary to support XDP. > Add extra XDP Tx queues needed to support XDP_TX and XDP_REDIRECT actions > without interfering with regular Tx traffic. > Also add functions dedicated to support XDP initialization for Rx and > Tx queues and call those functions from the existing algorithms of > queues configuration. > > Signed-off-by: Michal Kubiak <michal.kubiak@xxxxxxxxx> > Co-developed-by: Alexander Lobakin <aleksander.lobakin@xxxxxxxxx> > Signed-off-by: Alexander Lobakin <aleksander.lobakin@xxxxxxxxx> > --- > drivers/net/ethernet/intel/idpf/Kconfig | 2 +- > drivers/net/ethernet/intel/idpf/Makefile | 2 + > drivers/net/ethernet/intel/idpf/idpf.h | 20 ++ > drivers/net/ethernet/intel/idpf/idpf_txrx.h | 86 ++++++-- > drivers/net/ethernet/intel/idpf/xdp.h | 17 ++ > .../net/ethernet/intel/idpf/idpf_ethtool.c | 6 +- > drivers/net/ethernet/intel/idpf/idpf_lib.c | 21 +- > drivers/net/ethernet/intel/idpf/idpf_main.c | 1 + > .../ethernet/intel/idpf/idpf_singleq_txrx.c | 8 +- > drivers/net/ethernet/intel/idpf/idpf_txrx.c | 109 +++++++--- > .../net/ethernet/intel/idpf/idpf_virtchnl.c | 26 +-- > drivers/net/ethernet/intel/idpf/xdp.c | 189 ++++++++++++++++++ > 12 files changed, 415 insertions(+), 72 deletions(-) > create mode 100644 drivers/net/ethernet/intel/idpf/xdp.h > create mode 100644 drivers/net/ethernet/intel/idpf/xdp.c > > diff --git a/drivers/net/ethernet/intel/idpf/Kconfig b/drivers/net/ethernet/intel/idpf/Kconfig > index 1addd663acad..7207ee4dbae8 100644 > --- a/drivers/net/ethernet/intel/idpf/Kconfig > +++ b/drivers/net/ethernet/intel/idpf/Kconfig > @@ -5,7 +5,7 @@ config IDPF > tristate "Intel(R) Infrastructure Data Path Function Support" > depends on PCI_MSI > select DIMLIB > - select LIBETH > + select LIBETH_XDP > help > This driver supports Intel(R) Infrastructure Data Path Function > devices. > diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile > index 2ce01a0b5898..c58abe6f8f5d 100644 > --- a/drivers/net/ethernet/intel/idpf/Makefile > +++ b/drivers/net/ethernet/intel/idpf/Makefile > @@ -17,3 +17,5 @@ idpf-y := \ > idpf_vf_dev.o > > idpf-$(CONFIG_IDPF_SINGLEQ) += idpf_singleq_txrx.o > + > +idpf-y += xdp.o > diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h > index 50dde09c525b..4847760744ff 100644 > --- a/drivers/net/ethernet/intel/idpf/idpf.h > +++ b/drivers/net/ethernet/intel/idpf/idpf.h > @@ -257,6 +257,10 @@ struct idpf_port_stats { > * @txq_model: Split queue or single queue queuing model > * @txqs: Used only in hotpath to get to the right queue very fast > * @crc_enable: Enable CRC insertion offload > + * @xdpq_share: whether XDPSQ sharing is enabled > + * @num_xdp_txq: number of XDPSQs > + * @xdp_txq_offset: index of the first XDPSQ (== number of regular SQs) > + * @xdp_prog: installed XDP program > * @num_rxq: Number of allocated RX queues > * @num_bufq: Number of allocated buffer queues > * @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors > @@ -303,6 +307,11 @@ struct idpf_vport { > struct idpf_tx_queue **txqs; > bool crc_enable; > > + bool xdpq_share; > + u16 num_xdp_txq; > + u16 xdp_txq_offset; > + struct bpf_prog *xdp_prog; > + > u16 num_rxq; > u16 num_bufq; > u32 rxq_desc_count; > @@ -380,6 +389,7 @@ struct idpf_rss_data { > * ethtool > * @num_req_rxq_desc: Number of user requested RX queue descriptors through > * ethtool > + * @xdp_prog: requested XDP program to install > * @user_flags: User toggled config flags > * @mac_filter_list: List of MAC filters > * > @@ -391,6 +401,7 @@ struct idpf_vport_user_config_data { > u16 num_req_rx_qs; > u32 num_req_txq_desc; > u32 num_req_rxq_desc; > + struct bpf_prog *xdp_prog; > DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS); > struct list_head mac_filter_list; > }; > @@ -604,6 +615,15 @@ static inline int idpf_is_queue_model_split(u16 q_model) > q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT; > } > > +/** > + * idpf_xdp_is_prog_ena - check if there is an XDP program on adapter > + * @vport: vport to check > + */ > +static inline bool idpf_xdp_is_prog_ena(const struct idpf_vport *vport) > +{ > + return vport->adapter && vport->xdp_prog; > +} (...) > + > +#endif /* _IDPF_XDP_H_ */ > diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c > index 59b1a1a09996..1ca322bfe92f 100644 > --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c > +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c > @@ -186,9 +186,11 @@ static void idpf_get_channels(struct net_device *netdev, > { > struct idpf_netdev_priv *np = netdev_priv(netdev); > struct idpf_vport_config *vport_config; > + const struct idpf_vport *vport; > u16 num_txq, num_rxq; > u16 combined; > > + vport = idpf_netdev_to_vport(netdev); > vport_config = np->adapter->vport_config[np->vport_idx]; > > num_txq = vport_config->user_config.num_req_tx_qs; > @@ -202,8 +204,8 @@ static void idpf_get_channels(struct net_device *netdev, > ch->max_rx = vport_config->max_q.max_rxq; > ch->max_tx = vport_config->max_q.max_txq; > > - ch->max_other = IDPF_MAX_MBXQ; > - ch->other_count = IDPF_MAX_MBXQ; > + ch->max_other = IDPF_MAX_MBXQ + vport->num_xdp_txq; > + ch->other_count = IDPF_MAX_MBXQ + vport->num_xdp_txq; That's new I think. Do you explain somewhere that other `other` will carry xdpq count? Otherwise how would I know to interpret this value? Also from what I see num_txq carries (txq + xdpq) count. How is that affecting the `combined` from ethtool_channels? > > ch->combined_count = combined; > ch->rx_count = num_rxq - combined; > diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c > index 2594ca38e8ca..0f4edc9cd1ad 100644 (...) > + > +/** > + * __idpf_xdp_rxq_info_init - Setup XDP RxQ info for a given Rx queue > + * @rxq: Rx queue for which the resources are setup > + * @arg: flag indicating if the HW works in split queue mode > + * > + * Return: 0 on success, negative on failure. > + */ > +static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg) > +{ > + const struct idpf_vport *vport = rxq->q_vector->vport; > + bool split = idpf_is_queue_model_split(vport->rxq_model); > + const struct page_pool *pp; > + int err; > + > + err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx, > + rxq->q_vector->napi.napi_id, > + rxq->rx_buf_size); > + if (err) > + return err; > + > + pp = split ? rxq->bufq_sets[0].bufq.pp : rxq->pp; > + xdp_rxq_info_attach_page_pool(&rxq->xdp_rxq, pp); > + > + if (!split) > + return 0; why do you care about splitq model if on next patch you don't allow XDP_SETUP_PROG for that? > + > + rxq->xdpqs = &vport->txqs[vport->xdp_txq_offset]; > + rxq->num_xdp_txq = vport->num_xdp_txq; > + > + return 0; > +} > + > +/** > + * idpf_xdp_rxq_info_init_all - initialize RxQ info for all Rx queues in vport > + * @vport: vport to setup the info > + * > + * Return: 0 on success, negative on failure. > + */ > +int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport) > +{ > + return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, NULL); > +} > + > +/** > + * __idpf_xdp_rxq_info_deinit - Deinit XDP RxQ info for a given Rx queue > + * @rxq: Rx queue for which the resources are destroyed > + * @arg: flag indicating if the HW works in split queue mode > + * > + * Return: always 0. > + */ > +static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg) > +{ > + if (idpf_is_queue_model_split((size_t)arg)) { > + rxq->xdpqs = NULL; > + rxq->num_xdp_txq = 0; > + } > + > + xdp_rxq_info_detach_mem_model(&rxq->xdp_rxq); > + xdp_rxq_info_unreg(&rxq->xdp_rxq); > + > + return 0; > +} > + > +/** > + * idpf_xdp_rxq_info_deinit_all - deinit RxQ info for all Rx queues in vport > + * @vport: vport to setup the info > + */ > +void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport) > +{ > + idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit, > + (void *)(size_t)vport->rxq_model); > +} > + > +int idpf_vport_xdpq_get(const struct idpf_vport *vport) > +{ > + struct libeth_xdpsq_timer **timers __free(kvfree) = NULL; please bear with me here - so this array will exist as long as there is a single timers[i] allocated? even though it's a local var? this way you avoid the need to store it in vport? > + struct net_device *dev; > + u32 sqs; > + > + if (!idpf_xdp_is_prog_ena(vport)) > + return 0; > + > + timers = kvcalloc(vport->num_xdp_txq, sizeof(*timers), GFP_KERNEL); > + if (!timers) > + return -ENOMEM; > + > + for (u32 i = 0; i < vport->num_xdp_txq; i++) { > + timers[i] = kzalloc_node(sizeof(*timers[i]), GFP_KERNEL, > + cpu_to_mem(i)); > + if (!timers[i]) { > + for (int j = i - 1; j >= 0; j--) > + kfree(timers[j]); > + > + return -ENOMEM; > + } > + } > + > + dev = vport->netdev; > + sqs = vport->xdp_txq_offset; > + > + for (u32 i = sqs; i < vport->num_txq; i++) { > + struct idpf_tx_queue *xdpq = vport->txqs[i]; > + > + xdpq->complq = xdpq->txq_grp->complq; > + > + idpf_queue_clear(FLOW_SCH_EN, xdpq); > + idpf_queue_clear(FLOW_SCH_EN, xdpq->complq); > + idpf_queue_set(NOIRQ, xdpq); > + idpf_queue_set(XDP, xdpq); > + idpf_queue_set(XDP, xdpq->complq); > + > + xdpq->timer = timers[i - sqs]; > + libeth_xdpsq_get(&xdpq->xdp_lock, dev, vport->xdpq_share); > + > + xdpq->pending = 0; > + xdpq->xdp_tx = 0; > + xdpq->thresh = libeth_xdp_queue_threshold(xdpq->desc_count); > + } > + > + return 0; > +} > + > +void idpf_vport_xdpq_put(const struct idpf_vport *vport) > +{ > + struct net_device *dev; > + u32 sqs; > + > + if (!idpf_xdp_is_prog_ena(vport)) > + return; > + > + dev = vport->netdev; > + sqs = vport->xdp_txq_offset; > + > + for (u32 i = sqs; i < vport->num_txq; i++) { > + struct idpf_tx_queue *xdpq = vport->txqs[i]; > + > + if (!idpf_queue_has_clear(XDP, xdpq)) > + continue; > + > + libeth_xdpsq_put(&xdpq->xdp_lock, dev); > + > + kfree(xdpq->timer); > + idpf_queue_clear(NOIRQ, xdpq); > + } > +} > -- > 2.48.1 >