Re: [PATCH net-next v3 7/7] bnxt_en: add support for device memory tcp

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, Oct 8, 2024 at 11:45 AM David Wei <dw@xxxxxxxxxxx> wrote:
>

Hi David,
Thanks a lot for your review!

> On 2024-10-03 09:06, Taehee Yoo wrote:
> > diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> > index 872b15842b11..64e07d247f97 100644
> > --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> > +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> > @@ -55,6 +55,7 @@
> >  #include <net/page_pool/helpers.h>
> >  #include <linux/align.h>
> >  #include <net/netdev_queues.h>
> > +#include <net/netdev_rx_queue.h>
> >
> >  #include "bnxt_hsi.h"
> >  #include "bnxt.h"
> > @@ -863,6 +864,22 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
> >               bnapi->events &= ~BNXT_TX_CMP_EVENT;
> >  }
> >
> > +static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
> > +                                      struct bnxt_rx_ring_info *rxr,
> > +                                      unsigned int *offset,
> > +                                      gfp_t gfp)
>
> gfp is unused

I will remove unnecessary gfp parameter in v4.

>
> > +{
> > +     netmem_ref netmem;
> > +
> > +     netmem = page_pool_alloc_netmem(rxr->page_pool, GFP_ATOMIC);
> > +     if (!netmem)
> > +             return 0;
> > +     *offset = 0;
> > +
> > +     *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
>
> offset is always 0

Okay, I will remove this too in v4.

>
> > +     return netmem;
> > +}
> > +
> >  static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
> >                                        struct bnxt_rx_ring_info *rxr,
> >                                        unsigned int *offset,
>
> [...]
>
> > @@ -1192,6 +1209,7 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
> >
> >  static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
> >                              struct bnxt_cp_ring_info *cpr,
> > +                            struct sk_buff *skb,
> >                              struct skb_shared_info *shinfo,
> >                              u16 idx, u32 agg_bufs, bool tpa,
> >                              struct xdp_buff *xdp)
> > @@ -1211,7 +1229,7 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
> >               u16 cons, frag_len;
> >               struct rx_agg_cmp *agg;
> >               struct bnxt_sw_rx_agg_bd *cons_rx_buf;
> > -             struct page *page;
> > +             netmem_ref netmem;
> >               dma_addr_t mapping;
> >
> >               if (p5_tpa)
> > @@ -1223,9 +1241,15 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
> >                           RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
> >
> >               cons_rx_buf = &rxr->rx_agg_ring[cons];
> > -             skb_frag_fill_page_desc(frag, cons_rx_buf->page,
> > -                                     cons_rx_buf->offset, frag_len);
> > -             shinfo->nr_frags = i + 1;
> > +             if (skb) {
> > +                     skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
> > +                                            cons_rx_buf->offset, frag_len,
> > +                                            BNXT_RX_PAGE_SIZE);
> > +             } else {
> > +                     skb_frag_fill_page_desc(frag, netmem_to_page(cons_rx_buf->netmem),
> > +                                             cons_rx_buf->offset, frag_len);
> > +                     shinfo->nr_frags = i + 1;
> > +             }
>
> I feel like this function needs a refactor at some point to split out
> the skb and xdp paths.

Okay, I will add __bnxt_rx_agg_netmem() in v4 patch.

>
> >               __clear_bit(cons, rxr->rx_agg_bmap);
> >
> >               /* It is possible for bnxt_alloc_rx_page() to allocate
>
> [...]
>
> > @@ -3608,9 +3629,11 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
> >
> >  static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
> >                                  struct bnxt_rx_ring_info *rxr,
> > +                                int queue_idx,
>
> To save a parameter, the index is available already in rxr->bnapi->index

Okay, I also remove the queue_idx parameter in v4.

>
> >                                  int numa_node)
> >  {
> >       struct page_pool_params pp = { 0 };
> > +     struct netdev_rx_queue *rxq;
> >
> >       pp.pool_size = bp->rx_agg_ring_size;
> >       if (BNXT_RX_PAGE_MODE(bp))
> > @@ -3621,8 +3644,15 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
> >       pp.dev = &bp->pdev->dev;
> >       pp.dma_dir = bp->rx_dir;
> >       pp.max_len = PAGE_SIZE;
> > -     pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
> > +     pp.order = 0;
> > +
> > +     rxq = __netif_get_rx_queue(bp->dev, queue_idx);
> > +     if (rxq->mp_params.mp_priv)
> > +             pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_ALLOW_UNREADABLE_NETMEM;
> > +     else
> > +             pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
> >
> > +     pp.queue_idx = queue_idx;
> >       rxr->page_pool = page_pool_create(&pp);
> >       if (IS_ERR(rxr->page_pool)) {
> >               int err = PTR_ERR(rxr->page_pool);
> > @@ -3655,7 +3685,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
> >               cpu_node = cpu_to_node(cpu);
> >               netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
> >                          i, cpu_node);
> > -             rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
> > +             rc = bnxt_alloc_rx_page_pool(bp, rxr, i, cpu_node);
> >               if (rc)
> >                       return rc;
> >

Thanks a lot for catching things,
I will send v4 if there is no problem after some tests.

Thanks!
Taehee Yoo





[Index of Archives]     [Kernel Newbies]     [Security]     [Netfilter]     [Bugtraq]     [Linux FS]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Samba]     [Video 4 Linux]     [Device Mapper]     [Linux Resources]

  Powered by Linux