On Fri, 2022-09-23 at 18:45 +0300, Ioana Ciornei wrote: > From: Robert-Ionut Alexa <robert-ionut.alexa@xxxxxxx> > > This patch allows the configuration of multiple buffer pools associated > with a single DPNI object, each distinct DPBP object not necessarily > shared among all queues. > The user can interogate both the number of buffer pools and the buffer > count in each buffer pool by using the .get_ethtool_stats() callback. > > Signed-off-by: Robert-Ionut Alexa <robert-ionut.alexa@xxxxxxx> > Signed-off-by: Ioana Ciornei <ioana.ciornei@xxxxxxx> > --- > Changes in v2: > - Export dpaa2_eth_allocate_dpbp/dpaa2_eth_free_dpbp in this patch to > avoid a build warning. The functions will be used in next patches. > > .../net/ethernet/freescale/dpaa2/dpaa2-eth.c | 188 ++++++++++++------ > .../net/ethernet/freescale/dpaa2/dpaa2-eth.h | 26 ++- > .../ethernet/freescale/dpaa2/dpaa2-ethtool.c | 15 +- > 3 files changed, 162 insertions(+), 67 deletions(-) > > diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c > index 75d51572693d..aa93ed339b63 100644 > --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c > +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c > @@ -1,6 +1,6 @@ > // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) > /* Copyright 2014-2016 Freescale Semiconductor Inc. > - * Copyright 2016-2020 NXP > + * Copyright 2016-2022 NXP > */ > #include <linux/init.h> > #include <linux/module.h> > @@ -304,7 +304,7 @@ static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, > if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD) > return; > > - while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid, > + while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid, > ch->recycled_bufs, > ch->recycled_bufs_cnt)) == -EBUSY) { > if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) > @@ -1631,7 +1631,7 @@ static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) > * to the specified buffer pool > */ > static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv, > - struct dpaa2_eth_channel *ch, u16 bpid) > + struct dpaa2_eth_channel *ch) > { > struct device *dev = priv->net_dev->dev.parent; > u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; > @@ -1663,12 +1663,12 @@ static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv, > trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page), > DPAA2_ETH_RX_BUF_RAW_SIZE, > addr, priv->rx_buf_size, > - bpid); > + ch->bp->bpid); > } > > release_bufs: > /* In case the portal is busy, retry until successful */ > - while ((err = dpaa2_io_service_release(ch->dpio, bpid, > + while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid, > buf_array, i)) == -EBUSY) { > if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) > break; > @@ -1697,39 +1697,59 @@ static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv, > return 0; > } > > -static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) > +static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, > + struct dpaa2_eth_channel *ch) > { > - int i, j; > + int i; > int new_count; > > - for (j = 0; j < priv->num_channels; j++) { > - for (i = 0; i < DPAA2_ETH_NUM_BUFS; > - i += DPAA2_ETH_BUFS_PER_CMD) { > - new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid); > - priv->channel[j]->buf_count += new_count; > + for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) { > + new_count = dpaa2_eth_add_bufs(priv, ch); > + ch->buf_count += new_count; > > - if (new_count < DPAA2_ETH_BUFS_PER_CMD) { > - return -ENOMEM; > - } > - } > + if (new_count < DPAA2_ETH_BUFS_PER_CMD) > + return -ENOMEM; > } > > return 0; > } > > +static void dpaa2_eth_seed_pools(struct dpaa2_eth_priv *priv) > +{ > + struct net_device *net_dev = priv->net_dev; > + struct dpaa2_eth_channel *channel; > + int i, err = 0; > + > + for (i = 0; i < priv->num_channels; i++) { > + channel = priv->channel[i]; > + > + err = dpaa2_eth_seed_pool(priv, channel); > + > + /* Not much to do; the buffer pool, though not filled up, > + * may still contain some buffers which would enable us > + * to limp on. > + */ > + if (err) > + netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", > + channel->bp->dev->obj_desc.id, > + channel->bp->bpid); > + } > +} > + > /* > - * Drain the specified number of buffers from the DPNI's private buffer pool. > + * Drain the specified number of buffers from one of the DPNI's private buffer > + * pools. > * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD > */ > -static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count) > +static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid, > + int count) > { > u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; > int retries = 0; > int ret; > > do { > - ret = dpaa2_io_service_acquire(NULL, priv->bpid, > - buf_array, count); > + ret = dpaa2_io_service_acquire(NULL, bpid, buf_array, count); > if (ret < 0) { > if (ret == -EBUSY && > retries++ < DPAA2_ETH_SWP_BUSY_RETRIES) > @@ -1742,23 +1762,35 @@ static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count) > } while (ret); > } > > -static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv) > +static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv, int bpid) > { > int i; > > - dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); > - dpaa2_eth_drain_bufs(priv, 1); > + /* Drain the buffer pool */ > + dpaa2_eth_drain_bufs(priv, bpid, DPAA2_ETH_BUFS_PER_CMD); > + dpaa2_eth_drain_bufs(priv, bpid, 1); > > + /* Setup to zero the buffer count of all channels which were > + * using this buffer pool. > + */ > for (i = 0; i < priv->num_channels; i++) > - priv->channel[i]->buf_count = 0; > + if (priv->channel[i]->bp->bpid == bpid) > + priv->channel[i]->buf_count = 0; > +} > + > +static void dpaa2_eth_drain_pools(struct dpaa2_eth_priv *priv) > +{ > + int i; > + > + for (i = 0; i < priv->num_bps; i++) > + dpaa2_eth_drain_pool(priv, priv->bp[i]->bpid); > } > > /* Function is called from softirq context only, so we don't need to guard > * the access to percpu count > */ > static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv, > - struct dpaa2_eth_channel *ch, > - u16 bpid) > + struct dpaa2_eth_channel *ch) > { > int new_count; > > @@ -1766,7 +1798,7 @@ static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv, > return 0; > > do { > - new_count = dpaa2_eth_add_bufs(priv, ch, bpid); > + new_count = dpaa2_eth_add_bufs(priv, ch); > if (unlikely(!new_count)) { > /* Out of memory; abort for now, we'll try later on */ > break; > @@ -1848,7 +1880,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) > break; > > /* Refill pool if appropriate */ > - dpaa2_eth_refill_pool(priv, ch, priv->bpid); > + dpaa2_eth_refill_pool(priv, ch); > > store_cleaned = dpaa2_eth_consume_frames(ch, &fq); > if (store_cleaned <= 0) > @@ -2047,15 +2079,7 @@ static int dpaa2_eth_open(struct net_device *net_dev) > struct dpaa2_eth_priv *priv = netdev_priv(net_dev); > int err; > > - err = dpaa2_eth_seed_pool(priv, priv->bpid); > - if (err) { > - /* Not much to do; the buffer pool, though not filled up, > - * may still contain some buffers which would enable us > - * to limp on. > - */ > - netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", > - priv->dpbp_dev->obj_desc.id, priv->bpid); > - } > + dpaa2_eth_seed_pools(priv); > > if (!dpaa2_eth_is_type_phy(priv)) { > /* We'll only start the txqs when the link is actually ready; > @@ -2088,7 +2112,7 @@ static int dpaa2_eth_open(struct net_device *net_dev) > > enable_err: > dpaa2_eth_disable_ch_napi(priv); > - dpaa2_eth_drain_pool(priv); > + dpaa2_eth_drain_pools(priv); > return err; > } > > @@ -2193,7 +2217,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev) > dpaa2_eth_disable_ch_napi(priv); > > /* Empty the buffer pool */ > - dpaa2_eth_drain_pool(priv); > + dpaa2_eth_drain_pools(priv); > > /* Empty the Scatter-Gather Buffer cache */ > dpaa2_eth_sgt_cache_drain(priv); > @@ -3204,13 +3228,14 @@ static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv) > dpaa2_eth_set_fq_affinity(priv); > } > > -/* Allocate and configure one buffer pool for each interface */ > -static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv) > +/* Allocate and configure a buffer pool */ > +struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv) > { > - int err; > - struct fsl_mc_device *dpbp_dev; > struct device *dev = priv->net_dev->dev.parent; > + struct fsl_mc_device *dpbp_dev; > struct dpbp_attr dpbp_attrs; > + struct dpaa2_eth_bp *bp; > + int err; > > err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, > &dpbp_dev); > @@ -3219,12 +3244,16 @@ static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv) > err = -EPROBE_DEFER; > else > dev_err(dev, "DPBP device allocation failed\n"); > - return err; > + return ERR_PTR(err); > } > > - priv->dpbp_dev = dpbp_dev; > + bp = kzalloc(sizeof(*bp), GFP_KERNEL); > + if (!bp) { > + err = -ENOMEM; > + goto err_alloc; > + } It looks like 'bp' is leaked on later error paths. Cheers, Paolo