> -----Original Message----- > From: Frank Li <frank.li@xxxxxxx> > Sent: 2024年10月16日 0:54 > To: Wei Fang <wei.fang@xxxxxxx> > Cc: davem@xxxxxxxxxxxxx; edumazet@xxxxxxxxxx; kuba@xxxxxxxxxx; > pabeni@xxxxxxxxxx; robh@xxxxxxxxxx; krzk+dt@xxxxxxxxxx; > conor+dt@xxxxxxxxxx; Vladimir Oltean <vladimir.oltean@xxxxxxx>; Claudiu > Manoil <claudiu.manoil@xxxxxxx>; Clark Wang <xiaoning.wang@xxxxxxx>; > christophe.leroy@xxxxxxxxxx; linux@xxxxxxxxxxxxxxx; bhelgaas@xxxxxxxxxx; > horms@xxxxxxxxxx; imx@xxxxxxxxxxxxxxx; netdev@xxxxxxxxxxxxxxx; > devicetree@xxxxxxxxxxxxxxx; linux-kernel@xxxxxxxxxxxxxxx; > linux-pci@xxxxxxxxxxxxxxx > Subject: Re: [PATCH v2 net-next 10/13] net: enetc: extract > enetc_int_vector_init/destroy() from enetc_alloc_msix() > > On Tue, Oct 15, 2024 at 08:58:38PM +0800, Wei Fang wrote: > > From: Clark Wang <xiaoning.wang@xxxxxxx> > > > > Extract enetc_int_vector_init() and enetc_int_vector_destroy() from > > enetc_alloc_msix() so that the code is more concise and readable. > > > > Signed-off-by: Clark Wang <xiaoning.wang@xxxxxxx> > > Signed-off-by: Wei Fang <wei.fang@xxxxxxx> > > --- > > v2 changes: > > This patch is separated from v1 patch 9 ("net: enetc: optimize the > > allocation of tx_bdr"). Separate enetc_int_vector_init() from the > > original patch. In addition, add new help function > > enetc_int_vector_destroy(). > > --- > > drivers/net/ethernet/freescale/enetc/enetc.c | 174 > > +++++++++---------- > > 1 file changed, 87 insertions(+), 87 deletions(-) > > > > diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c > > b/drivers/net/ethernet/freescale/enetc/enetc.c > > index 032d8eadd003..d36af3f8ba31 100644 > > --- a/drivers/net/ethernet/freescale/enetc/enetc.c > > +++ b/drivers/net/ethernet/freescale/enetc/enetc.c > > @@ -2965,6 +2965,87 @@ int enetc_ioctl(struct net_device *ndev, struct > > ifreq *rq, int cmd) } EXPORT_SYMBOL_GPL(enetc_ioctl); > > > > +static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i, > > + int v_tx_rings) > > +{ > > + struct enetc_int_vector *v __free(kfree); > > Old code have not use cleanup. Please keep exact same as old codes. > Or you should mention at commit log at least. > Okay, I will mention it at the commit message. > > > + struct enetc_bdr *bdr; > > + int j, err; > > + > > + v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); > > + if (!v) > > + return -ENOMEM; > > + > > + bdr = &v->rx_ring; > > + bdr->index = i; > > + bdr->ndev = priv->ndev; > > + bdr->dev = priv->dev; > > + bdr->bd_count = priv->rx_bd_count; > > + bdr->buffer_offset = ENETC_RXB_PAD; > > + priv->rx_ring[i] = bdr; > > + > > + err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); > > + if (err) > > + return err; > > + > > + err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, > > + MEM_TYPE_PAGE_SHARED, NULL); > > + if (err) { > > + xdp_rxq_info_unreg(&bdr->xdp.rxq); > > + return err; > > + } > > + > > + /* init defaults for adaptive IC */ > > + if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { > > + v->rx_ictt = 0x1; > > + v->rx_dim_en = true; > > + } > > + > > + INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); > > + netif_napi_add(priv->ndev, &v->napi, enetc_poll); > > + v->count_tx_rings = v_tx_rings; > > + > > + for (j = 0; j < v_tx_rings; j++) { > > + int idx; > > + > > + /* default tx ring mapping policy */ > > + idx = priv->bdr_int_num * j + i; > > + __set_bit(idx, &v->tx_rings_map); > > + bdr = &v->tx_ring[j]; > > + bdr->index = idx; > > + bdr->ndev = priv->ndev; > > + bdr->dev = priv->dev; > > + bdr->bd_count = priv->tx_bd_count; > > + priv->tx_ring[idx] = bdr; > > + } > > + > > + priv->int_vector[i] = no_free_ptr(v); > > + > > + return 0; > > +} > > + > > +static void enetc_int_vector_destroy(struct enetc_ndev_priv *priv, > > +int i) { > > + struct enetc_int_vector *v = priv->int_vector[i]; > > + struct enetc_bdr *rx_ring = &v->rx_ring; > > + int j, tx_ring_index; > > + > > + xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); > > + xdp_rxq_info_unreg(&rx_ring->xdp.rxq); > > + netif_napi_del(&v->napi); > > + cancel_work_sync(&v->rx_dim.work); > > + > > + priv->rx_ring[i] = NULL; > > + > > + for (j = 0; j < v->count_tx_rings; j++) { > > + tx_ring_index = priv->bdr_int_num * j + i; > > + priv->tx_ring[tx_ring_index] = NULL; > > + } > > + > > + kfree(v); > > + priv->int_vector[i] = NULL; > > +} > > + > > int enetc_alloc_msix(struct enetc_ndev_priv *priv) { > > struct pci_dev *pdev = priv->si->pdev; @@ -2986,64 +3067,9 @@ int > > enetc_alloc_msix(struct enetc_ndev_priv *priv) > > /* # of tx rings per int vector */ > > v_tx_rings = priv->num_tx_rings / priv->bdr_int_num; > > > > - for (i = 0; i < priv->bdr_int_num; i++) { > > - struct enetc_int_vector *v; > > - struct enetc_bdr *bdr; > > - int j; > > - > > - v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); > > - if (!v) { > > - err = -ENOMEM; > > + for (i = 0; i < priv->bdr_int_num; i++) > > + if (enetc_int_vector_init(priv, i, v_tx_rings)) > > goto fail; > > - } > > - > > - priv->int_vector[i] = v; > > - > > - bdr = &v->rx_ring; > > - bdr->index = i; > > - bdr->ndev = priv->ndev; > > - bdr->dev = priv->dev; > > - bdr->bd_count = priv->rx_bd_count; > > - bdr->buffer_offset = ENETC_RXB_PAD; > > - priv->rx_ring[i] = bdr; > > - > > - err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); > > - if (err) { > > - kfree(v); > > - goto fail; > > - } > > - > > - err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, > > - MEM_TYPE_PAGE_SHARED, NULL); > > - if (err) { > > - xdp_rxq_info_unreg(&bdr->xdp.rxq); > > - kfree(v); > > - goto fail; > > - } > > - > > - /* init defaults for adaptive IC */ > > - if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { > > - v->rx_ictt = 0x1; > > - v->rx_dim_en = true; > > - } > > - INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); > > - netif_napi_add(priv->ndev, &v->napi, enetc_poll); > > - v->count_tx_rings = v_tx_rings; > > - > > - for (j = 0; j < v_tx_rings; j++) { > > - int idx; > > - > > - /* default tx ring mapping policy */ > > - idx = priv->bdr_int_num * j + i; > > - __set_bit(idx, &v->tx_rings_map); > > - bdr = &v->tx_ring[j]; > > - bdr->index = idx; > > - bdr->ndev = priv->ndev; > > - bdr->dev = priv->dev; > > - bdr->bd_count = priv->tx_bd_count; > > - priv->tx_ring[idx] = bdr; > > - } > > - } > > > > num_stack_tx_queues = enetc_num_stack_tx_queues(priv); > > > > @@ -3062,16 +3088,8 @@ int enetc_alloc_msix(struct enetc_ndev_priv > *priv) > > return 0; > > > > fail: > > - while (i--) { > > - struct enetc_int_vector *v = priv->int_vector[i]; > > - struct enetc_bdr *rx_ring = &v->rx_ring; > > - > > - xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); > > - xdp_rxq_info_unreg(&rx_ring->xdp.rxq); > > - netif_napi_del(&v->napi); > > - cancel_work_sync(&v->rx_dim.work); > > - kfree(v); > > - } > > + while (i--) > > + enetc_int_vector_destroy(priv, i); > > > > pci_free_irq_vectors(pdev); > > > > @@ -3083,26 +3101,8 @@ void enetc_free_msix(struct enetc_ndev_priv > > *priv) { > > int i; > > > > - for (i = 0; i < priv->bdr_int_num; i++) { > > - struct enetc_int_vector *v = priv->int_vector[i]; > > - struct enetc_bdr *rx_ring = &v->rx_ring; > > - > > - xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); > > - xdp_rxq_info_unreg(&rx_ring->xdp.rxq); > > - netif_napi_del(&v->napi); > > - cancel_work_sync(&v->rx_dim.work); > > - } > > - > > - for (i = 0; i < priv->num_rx_rings; i++) > > - priv->rx_ring[i] = NULL; > > - > > - for (i = 0; i < priv->num_tx_rings; i++) > > - priv->tx_ring[i] = NULL; > > - > > - for (i = 0; i < priv->bdr_int_num; i++) { > > - kfree(priv->int_vector[i]); > > - priv->int_vector[i] = NULL; > > - } > > + for (i = 0; i < priv->bdr_int_num; i++) > > + enetc_int_vector_destroy(priv, i); > > > > /* disable all MSIX for this device */ > > pci_free_irq_vectors(priv->si->pdev); > > -- > > 2.34.1 > >