>> >> - pp_params.order = get_order(buf_size); >> - pp_params.flags = PP_FLAG_DMA_MAP; >> - pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs); >> - pp_params.nid = NUMA_NO_NODE; >> - pp_params.dev = pfvf->dev; >> - pp_params.dma_dir = DMA_FROM_DEVICE; >> - pool->page_pool = page_pool_create(&pp_params); >> - if (IS_ERR(pool->page_pool)) { >> - netdev_err(pfvf->netdev, "Creation of page pool failed\n"); >> - return PTR_ERR(pool->page_pool); >> + /* Set XSK pool to support AF_XDP zero-copy */ >> + xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, pool_id); >> + if (xsk_pool) { >> + pool->xsk_pool = xsk_pool; >> + pool->xdp_cnt = numptrs; >> + pool->xdp = devm_kcalloc(pfvf->dev, >> + numptrs, sizeof(struct xdp_buff *), >GFP_KERNEL); > >What is the rationale behind having a buffer pool within your driver >while you have this very same thing within xsk_buff_pool? > >You're doubling your work. Just use the xsk_buff_alloc_batch() and have >a simpler ZC implementation in your driver. [Suman] This series is the initial change, we will use the batch API in following update patches. > >> + if (IS_ERR(pool->xdp)) { >> + netdev_err(pfvf->netdev, "Creation of xsk pool >failed\n"); >> + return PTR_ERR(pool->xdp); >> + } >> } >> >> return 0; >> @@ -1543,9 +1578,18 @@ int otx2_sq_aura_pool_init(struct otx2_nic >*pfvf) >> } >> >> for (ptr = 0; ptr < num_sqbs; ptr++) { >> - err = otx2_alloc_rbuf(pfvf, pool, &bufptr); >> - if (err) >> + err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr); >> + if (err) { >> + if (pool->xsk_pool) { >> + ptr--; >> + while (ptr >= 0) { >> + xsk_buff_free(pool->xdp[ptr]); >> + ptr--; >> + } >> + } >> goto err_mem; >> + } >> + >> pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr); >> sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; >> } >> @@ -1595,11 +1639,19 @@ int otx2_rq_aura_pool_init(struct otx2_nic >*pfvf) >> /* Allocate pointers and free them to aura/pool */ >> for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { >> pool = &pfvf->qset.pool[pool_id]; >> + >> for (ptr = 0; ptr < num_ptrs; ptr++) { >> - err = otx2_alloc_rbuf(pfvf, pool, &bufptr); >> - if (err) >> + err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr); >> + if (err) { >> + if (pool->xsk_pool) { >> + while (ptr) >> + xsk_buff_free(pool->xdp[--ptr]); >> + } >> return -ENOMEM; >> + } >> + >> pfvf->hw_ops->aura_freeptr(pfvf, pool_id, >> + pool->xsk_pool ? bufptr : >> bufptr + OTX2_HEAD_ROOM); >> } >> } >> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h >> b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h >> index d5fbccb289df..60508971b62f 100644 >> --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h >> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h >> @@ -532,6 +532,8 @@ struct otx2_nic { >> >> /* Inline ipsec */ >> struct cn10k_ipsec ipsec; >> + /* af_xdp zero-copy */ >> + unsigned long *af_xdp_zc_qidx; >> }; >> >> static inline bool is_otx2_lbkvf(struct pci_dev *pdev) @@ -1003,7 >> +1005,7 @@ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, >> u16 schq); void otx2_free_pending_sqe(struct otx2_nic *pfvf); void >> otx2_sqb_flush(struct otx2_nic *pfvf); int otx2_alloc_rbuf(struct >> otx2_nic *pfvf, struct otx2_pool *pool, >> - dma_addr_t *dma); >> + dma_addr_t *dma, int qidx, int idx); >> int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); void >> otx2_ctx_disable(struct mbox *mbox, int type, bool npa); int >> otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); @@ -1033,6 >> +1035,8 @@ void otx2_pfaf_mbox_destroy(struct otx2_nic *pf); void >> otx2_disable_mbox_intr(struct otx2_nic *pf); void >> otx2_disable_napi(struct otx2_nic *pf); irqreturn_t >> otx2_cq_intr_handler(int irq, void *cq_irq); >> +int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura); int >> +otx2_cq_init(struct otx2_nic *pfvf, u16 qidx); >> >> /* RSS configuration APIs*/ >> int otx2_rss_init(struct otx2_nic *pfvf); diff --git >> a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c >> b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c >> index 4347a3c95350..50a42cd5d50a 100644 >> --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c >> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c >> @@ -27,6 +27,7 @@ >> #include "qos.h" >> #include <rvu_trace.h> >> #include "cn10k_ipsec.h" >> +#include "otx2_xsk.h" >> >> #define DRV_NAME "rvu_nicpf" >> #define DRV_STRING "Marvell RVU NIC Physical Function Driver" >> @@ -1662,9 +1663,7 @@ void otx2_free_hw_resources(struct otx2_nic *pf) >> struct nix_lf_free_req *free_req; >> struct mbox *mbox = &pf->mbox; >> struct otx2_cq_queue *cq; >> - struct otx2_pool *pool; >> struct msg_req *req; >> - int pool_id; >> int qidx; >> >> /* Ensure all SQE are processed */ >> @@ -1705,13 +1704,6 @@ void otx2_free_hw_resources(struct otx2_nic >*pf) >> /* Free RQ buffer pointers*/ >> otx2_free_aura_ptr(pf, AURA_NIX_RQ); >> >> - for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) { >> - pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx); >> - pool = &pf->qset.pool[pool_id]; >> - page_pool_destroy(pool->page_pool); >> - pool->page_pool = NULL; >> - } >> - >> otx2_free_cq_res(pf); >> >> /* Free all ingress bandwidth profiles allocated */ @@ -2788,6 >> +2780,8 @@ static int otx2_xdp(struct net_device *netdev, struct >netdev_bpf *xdp) >> switch (xdp->command) { >> case XDP_SETUP_PROG: >> return otx2_xdp_setup(pf, xdp->prog); >> + case XDP_SETUP_XSK_POOL: >> + return otx2_xsk_pool_setup(pf, xdp->xsk.pool, xdp- >>xsk.queue_id); >> default: >> return -EINVAL; >> } >> @@ -2865,6 +2859,7 @@ static const struct net_device_ops >otx2_netdev_ops = { >> .ndo_set_vf_vlan = otx2_set_vf_vlan, >> .ndo_get_vf_config = otx2_get_vf_config, >> .ndo_bpf = otx2_xdp, >> + .ndo_xsk_wakeup = otx2_xsk_wakeup, >> .ndo_xdp_xmit = otx2_xdp_xmit, >> .ndo_setup_tc = otx2_setup_tc, >> .ndo_set_vf_trust = otx2_ndo_set_vf_trust, >> @@ -3203,16 +3198,26 @@ static int otx2_probe(struct pci_dev *pdev, >const struct pci_device_id *id) >> /* Enable link notifications */ >> otx2_cgx_config_linkevents(pf, true); >> >> + pf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL); > >if this is taken from ice drivers then be aware we got rid of bitmap >tracking zc enabled queues. see adbf5a42341f ("ice: remove af_xdp_zc_qps >bitmap"). > >in case you would still have a need for that after going through >referenced commit, please provide us some justification why. [Suman] Thanks for pointing it out. I will check if it can be avoided.