> + io_zcrx_return_niov(niov); The last 5 lines or so is basically doing what page_pool_put_netmem() does, except there is a pp != niov->pp check in the middle. Can we call page_pool_put_netmem() directly if pp != niov->pp? It would just reduce the code duplication a bit and reduce the amount of custom reffing code we need to add for this mp. > + continue; > + } > + > + page_pool_mp_return_in_cache(pp, netmem); So if niov->pp != pp, we end up basically doing a page_pool_put_netmem(), which is the 'correct' way to return a netmem to the page_pool, or at least is the way to return a netmem that all the other devmem/pages memory types uses. However if niov->pp == pp, we end up page_pool_mp_return_in_cache(), which is basically the same as page_pool_put_unrefed_netmem but skips the ptr_ring, so it's slightly faster and less overhead. I would honestly elect to page_pool_put_netmem() regardless of niov->pp/pp check. Sure it would be a bit more overhead than the code here, but it would reduce the custom pp refing code we need to add for this mp and it will replenish the ptr_ring in both cases, which may be even faster by reducing the number of times we need to replenish. We can always add micro optimizations like skipping the ptr_ring for slightly faster code if there is evidence there is significant perf improvement. > + } while (--entries); > + > + smp_store_release(&ifq->rq_ring->head, ifq->cached_rq_head); > + spin_unlock_bh(&ifq->rq_lock); > +} > + > +static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq) > +{ > + struct io_zcrx_area *area = ifq->area; > + > + spin_lock_bh(&area->freelist_lock); I assume if you have 1 area serving many rx queues then you start contending on this lock, no? If you find it so in the future, genpool may help. > + while (area->free_count && pp->alloc.count < PP_ALLOC_CACHE_REFILL) { > + struct net_iov *niov = __io_zcrx_get_free_niov(area); > + netmem_ref netmem = net_iov_to_netmem(niov); > + > + page_pool_set_pp_info(pp, netmem); > + page_pool_mp_return_in_cache(pp, netmem); > + > + pp->pages_state_hold_cnt++; > + trace_page_pool_state_hold(pp, netmem, pp->pages_state_hold_cnt); > + } > + spin_unlock_bh(&area->freelist_lock); > +} > + > +static netmem_ref io_pp_zc_alloc_netmems(struct page_pool *pp, gfp_t gfp) > +{ > + struct io_zcrx_ifq *ifq = pp->mp_priv; > + > + /* pp should already be ensuring that */ > + if (unlikely(pp->alloc.count)) > + goto out_return; > + > + io_zcrx_ring_refill(pp, ifq); > + if (likely(pp->alloc.count)) > + goto out_return; > + > + io_zcrx_refill_slow(pp, ifq); > + if (!pp->alloc.count) > + return 0; > +out_return: > + return pp->alloc.cache[--pp->alloc.count]; > +} > + > +static bool io_pp_zc_release_netmem(struct page_pool *pp, netmem_ref netmem) > +{ > + if (WARN_ON_ONCE(!netmem_is_net_iov(netmem))) > + return false; > + > + if (page_pool_unref_netmem(netmem, 1) == 0) Check is redundant, AFAICT. pp would never release a netmem unless the pp refcount is 1. > + io_zcrx_return_niov_freelist(netmem_to_net_iov(netmem)); > + return false; > } > + > +static int io_pp_zc_init(struct page_pool *pp) > +{ > + struct io_zcrx_ifq *ifq = pp->mp_priv; > + > + if (WARN_ON_ONCE(!ifq)) > + return -EINVAL; > + if (WARN_ON_ONCE(ifq->dev != pp->slow.netdev)) > + return -EINVAL; > + if (pp->dma_map) > + return -EOPNOTSUPP; > + if (pp->p.order != 0) > + return -EOPNOTSUPP; > + if (pp->p.dma_dir != DMA_FROM_DEVICE) > + return -EOPNOTSUPP; > + > + percpu_ref_get(&ifq->ctx->refs); > + return 0; > +} > + > +static void io_pp_zc_destroy(struct page_pool *pp) > +{ > + struct io_zcrx_ifq *ifq = pp->mp_priv; > + struct io_zcrx_area *area = ifq->area; > + > + if (WARN_ON_ONCE(area->free_count != area->nia.num_niovs)) > + return; > + percpu_ref_put(&ifq->ctx->refs); > +} > + > +static int io_pp_nl_report(const struct page_pool *pool, struct sk_buff *rsp) > +{ > + return nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IO_URING, 0); > +} > + > +static const struct memory_provider_ops io_uring_pp_zc_ops = { > + .alloc_netmems = io_pp_zc_alloc_netmems, > + .release_netmem = io_pp_zc_release_netmem, > + .init = io_pp_zc_init, > + .destroy = io_pp_zc_destroy, > + .nl_report = io_pp_nl_report, > +}; > diff --git a/io_uring/zcrx.h b/io_uring/zcrx.h > index 46988a1dbd54..beacf1ea6380 100644 > --- a/io_uring/zcrx.h > +++ b/io_uring/zcrx.h > @@ -9,7 +9,9 @@ > struct io_zcrx_area { > struct net_iov_area nia; > struct io_zcrx_ifq *ifq; > + atomic_t *user_refs; > > + bool is_mapped; > u16 area_id; > struct page **pages; > > @@ -26,6 +28,8 @@ struct io_zcrx_ifq { > struct io_uring *rq_ring; > struct io_uring_zcrx_rqe *rqes; > u32 rq_entries; > + u32 cached_rq_head; > + spinlock_t rq_lock; > > u32 if_rxq; > struct net_device *dev; > -- > 2.43.5 > -- Thanks, Mina