On Mon, 2022-12-19 at 10:55 +0100, Lorenzo Bianconi wrote: > > From: Bo Jiao <Bo.Jiao@xxxxxxxxxxxx> > > > > fix potential memory leakage, recycle rxwi when mt76_dma_add_buf() > > call fail. > > > > Signed-off-by: Bo Jiao <Bo.Jiao@xxxxxxxxxxxx> > > --- > > v2: > > - recycle rxwi when page_frag_alloc() and dma_map_single() fail. > > --- > > drivers/net/wireless/mediatek/mt76/dma.c | 27 ++++++++++++++---- > > ------ > > 1 file changed, 16 insertions(+), 11 deletions(-) > > > > diff --git a/drivers/net/wireless/mediatek/mt76/dma.c > > b/drivers/net/wireless/mediatek/mt76/dma.c > > index fc24b35..76ad47d 100644 > > --- a/drivers/net/wireless/mediatek/mt76/dma.c > > +++ b/drivers/net/wireless/mediatek/mt76/dma.c > > @@ -580,24 +580,29 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct > > mt76_queue *q) > > > > buf = page_frag_alloc(&q->rx_page, q->buf_size, > > GFP_ATOMIC); > > if (!buf) > > - break; > > + goto out; > > > > addr = dma_map_single(dev->dma_dev, buf, len, > > DMA_FROM_DEVICE); > > - if (unlikely(dma_mapping_error(dev->dma_dev, addr))) { > > - skb_free_frag(buf); > > - break; > > - } > > + if (unlikely(dma_mapping_error(dev->dma_dev, addr))) > > + goto free; > > > > qbuf.addr = addr + offset; > > qbuf.len = len - offset; > > qbuf.skip_unmap = false; > > - if (mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t) < 0) > > { > > - dma_unmap_single(dev->dma_dev, addr, len, > > - DMA_FROM_DEVICE); > > - skb_free_frag(buf); > > - break; > > - } > > + if (mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t) < 0) > > + goto umap; > > + > > frames++; > > + continue; > > + > > +umap: > > + dma_unmap_single(dev->dma_dev, addr, len, > > + DMA_FROM_DEVICE); > > +free: > > + skb_free_frag(buf); > > +out: > > + mt76_put_rxwi(dev, t); > > + break; > > } > > > > if (frames) > > Hi Bo, > > I guess in the way below, the code is more readable, what do you > think? > > Regards, > Lorenzo > > diff --git a/drivers/net/wireless/mediatek/mt76/dma.c > b/drivers/net/wireless/mediatek/mt76/dma.c > index fad5fe19fe18..001538f698f1 100644 > --- a/drivers/net/wireless/mediatek/mt76/dma.c > +++ b/drivers/net/wireless/mediatek/mt76/dma.c > @@ -571,13 +571,6 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct > mt76_queue *q) > struct mt76_queue_buf qbuf; > void *buf = NULL; > > - if ((q->flags & MT_QFLAG_WED) && > - FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == > MT76_WED_Q_RX) { > - t = mt76_get_rxwi(dev); > - if (!t) > - break; > - } > - > buf = page_frag_alloc(&q->rx_page, q->buf_size, > GFP_ATOMIC); > if (!buf) > break; > @@ -588,16 +581,27 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct > mt76_queue *q) > break; > } > > + if ((q->flags & MT_QFLAG_WED) && > + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == > MT76_WED_Q_RX) { > + t = mt76_get_rxwi(dev); > + if (!t) > + goto unmap; > + } > + > qbuf.addr = addr + offset; > qbuf.len = len - offset; > qbuf.skip_unmap = false; > - if (mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t) < 0) > { > - dma_unmap_single(dev->dma_dev, addr, len, > - DMA_FROM_DEVICE); > - skb_free_frag(buf); > - break; > + > + if (!mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t)) { > + frames++; > + continue; > } > - frames++; > + > +unmap: > + dma_unmap_single(dev->dma_dev, addr, len, > DMA_FROM_DEVICE); > + skb_free_frag(buf); > + mt76_put_rxwi(dev, t); > + break; > } > > if (frames) > Hi Lore, we love your patch, but we have another patch to avoid memory fragmentation by duplicating the rx skb after mt76_dma_dequeue(). it requires mt76_get_rxwi() be placed before page_frag_alloc(). the below patch(need rebase) will be sent after the current patch is accepted. --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -386,9 +386,11 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, SKB_WITH_OVERHEAD(q->buf_size), DMA_FROM_DEVICE); - buf = t->ptr; + buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); + if (!buf) + return NULL; + memcpy(buf, t->ptr, SKB_WITH_OVERHEAD(q->buf_size)); t->dma_addr = 0; - t->ptr = NULL; mt76_put_rxwi(dev, t); @@ -569,6 +571,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) while (q->queued < q->ndesc - 1) { struct mt76_txwi_cache *t = NULL; struct mt76_queue_buf qbuf; + bool skip_alloc = false; void *buf = NULL; if ((q->flags & MT_QFLAG_WED) && @@ -576,11 +579,19 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) t = mt76_get_rxwi(dev); if (!t) break; + + if (t->ptr) { + skip_alloc = true; + buf = t->ptr; + } } - buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); - if (!buf) - break; + if (!skip_alloc) { + buf = page_frag_alloc(&q->rx_page, q->buf_size, + GFP_ATOMIC); + if (!buf) + break; + } addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(dev->dma_dev, addr))) { -- 2.18.0 > > -- > > 2.18.0 > >