From: Geetha sowjanya <gakula@xxxxxxxxxxx> When xdp buffers are from page pool do not dma unmap the buffers. DMA map/unmap are handled by the page_pool APIs. Signed-off-by: Geetha sowjanya <gakula@xxxxxxxxxxx> Signed-off-by: Suman Ghosh <sumang@xxxxxxxxxxx> --- .../marvell/octeontx2/nic/otx2_common.h | 4 +- .../ethernet/marvell/octeontx2/nic/otx2_pf.c | 8 +++- .../marvell/octeontx2/nic/otx2_txrx.c | 42 ++++++++++++------- .../marvell/octeontx2/nic/otx2_txrx.h | 1 + 4 files changed, 37 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 65814e3dc93f..951fdf6bc2c4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -21,6 +21,7 @@ #include <linux/time64.h> #include <linux/dim.h> #include <uapi/linux/if_macsec.h> +#include <net/page_pool/helpers.h> #include <mbox.h> #include <npc.h> @@ -1094,7 +1095,8 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); -bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx); +bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, + u16 qidx, u16 flags); u16 otx2_get_max_mtu(struct otx2_nic *pfvf); int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index e1dde93e8af8..8ba44164736a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -2701,11 +2701,15 @@ static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf, if (dma_mapping_error(pf->dev, dma_addr)) return -ENOMEM; - err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx); + err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, + qidx, XDP_REDIRECT); if (!err) { otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE); page = virt_to_page(xdpf->data); - put_page(page); + if (page->pp) + page_pool_recycle_direct(page->pp, page); + else + put_page(page); return -ENOMEM; } return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index ed8b37eb2054..730f2b7742db 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -101,19 +101,20 @@ static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf, struct nix_send_comp_s *snd_comp = &cqe->comp; struct sg_list *sg; struct page *page; - u64 pa; + u64 pa, iova; sg = &sq->sg[snd_comp->sqe_id]; - pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]); - otx2_dma_unmap_page(pfvf, sg->dma_addr[0], - sg->size[0], DMA_TO_DEVICE); + iova = sg->dma_addr[0] - OTX2_HEAD_ROOM; + pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); page = virt_to_page(phys_to_virt(pa)); + if (sg->flags & XDP_REDIRECT) + otx2_dma_unmap_page(pfvf, sg->dma_addr[0], sg->size[0], DMA_TO_DEVICE); + if (page->pp) { page_pool_recycle_direct(page->pp, page); return; } - put_page(page); } @@ -1365,7 +1366,7 @@ void otx2_free_pending_sqe(struct otx2_nic *pfvf) } static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr, - int len, int *offset) + int len, int *offset, u16 flags) { struct nix_sqe_sg_s *sg = NULL; u64 *iova = NULL; @@ -1382,9 +1383,11 @@ static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr, sq->sg[sq->head].dma_addr[0] = dma_addr; sq->sg[sq->head].size[0] = len; sq->sg[sq->head].num_segs = 1; + sq->sg[sq->head].flags = flags; } -bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx) +bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, + u16 qidx, u16 flags) { struct nix_sqe_hdr_s *sqe_hdr; struct otx2_snd_queue *sq; @@ -1410,7 +1413,7 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx) offset = sizeof(*sqe_hdr); - otx2_xdp_sqe_add_sg(sq, iova, len, &offset); + otx2_xdp_sqe_add_sg(sq, iova, len, &offset, flags); sqe_hdr->sizem1 = (offset / 16) - 1; pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); @@ -1451,19 +1454,24 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, case XDP_TX: qidx += pfvf->hw.tx_queues; cq->pool_ptrs++; - return otx2_xdp_sq_append_pkt(pfvf, iova, - cqe->sg.seg_size, qidx); + return otx2_xdp_sq_append_pkt(pfvf, cqe->sg.seg_addr, + cqe->sg.seg_size, qidx, XDP_TX); case XDP_REDIRECT: cq->pool_ptrs++; err = xdp_do_redirect(pfvf->netdev, &xdp, prog); - otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, - DMA_FROM_DEVICE); if (!err) { *need_xdp_flush = true; return true; } - page_pool_recycle_direct(pool->page_pool, page); + if (page->pp) { + page_pool_recycle_direct(pool->page_pool, page); + return false; + } + + otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, + DMA_FROM_DEVICE); + put_page(page); break; default: bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act); @@ -1472,10 +1480,14 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, trace_xdp_exception(pfvf->netdev, prog, act); break; case XDP_DROP: + cq->pool_ptrs++; + if (page->pp) { + page_pool_recycle_direct(pool->page_pool, page); + return true; + } otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE); - page_pool_recycle_direct(pool->page_pool, page); - cq->pool_ptrs++; + put_page(page); return true; } return false; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index d23810963fdb..92e1e84cad75 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -76,6 +76,7 @@ struct otx2_rcv_queue { struct sg_list { u16 num_segs; + u16 flags; u64 skb; u64 size[OTX2_MAX_FRAGS_IN_SQE]; u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE]; -- 2.25.1