On Thu, Feb 27, 2025 at 04:12:04AM +0000, Mina Almasry wrote: > int mp_dmabuf_devmem_init(struct page_pool *pool) > diff --git a/net/core/devmem.h b/net/core/devmem.h > index 946f2e015746..67168aae5e5b 100644 > --- a/net/core/devmem.h > +++ b/net/core/devmem.h > @@ -23,8 +23,9 @@ struct net_devmem_dmabuf_binding { > > /* The user holds a ref (via the netlink API) for as long as they want > * the binding to remain alive. Each page pool using this binding holds > - * a ref to keep the binding alive. Each allocated net_iov holds a > - * ref. > + * a ref to keep the binding alive. The page_pool does not release the > + * ref until all the net_iovs allocated from this binding are released > + * back to the page_pool. > * > * The binding undos itself and unmaps the underlying dmabuf once all > * those refs are dropped and the binding is no longer desired or in > @@ -32,7 +33,10 @@ struct net_devmem_dmabuf_binding { > * > * net_devmem_get_net_iov() on dmabuf net_iovs will increment this > * reference, making sure that the binding remains alive until all the > - * net_iovs are no longer used. > + * net_iovs are no longer used. net_iovs allocated from this binding > + * that are stuck in the TX path for any reason (such as awaiting > + * retransmits) hold a reference to the binding until the skb holding > + * them is freed. > */ > refcount_t ref; > > @@ -48,6 +52,14 @@ struct net_devmem_dmabuf_binding { > * active. > */ > u32 id; > + > + /* Array of net_iov pointers for this binding, sorted by virtual > + * address. This array is convenient to map the virtual addresses to > + * net_iovs in the TX path. > + */ > + struct net_iov **tx_vec; > + > + struct work_struct unbind_w; > }; > > #if defined(CONFIG_NET_DEVMEM) > @@ -64,14 +76,17 @@ struct dmabuf_genpool_chunk_owner { > dma_addr_t base_dma_addr; > }; > > -void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding); > +void __net_devmem_dmabuf_binding_free(struct work_struct *wq); > struct net_devmem_dmabuf_binding * > -net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd, > - struct netlink_ext_ack *extack); > +net_devmem_bind_dmabuf(struct net_device *dev, > + enum dma_data_direction direction, > + unsigned int dmabuf_fd, struct netlink_ext_ack *extack); > +struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id); > void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding); > int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx, > struct net_devmem_dmabuf_binding *binding, > struct netlink_ext_ack *extack); > +void net_devmem_bind_tx_release(struct sock *sk); > > static inline struct dmabuf_genpool_chunk_owner * > net_devmem_iov_to_chunk_owner(const struct net_iov *niov) > @@ -100,10 +115,10 @@ static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov) > ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT); > } > > -static inline void > +static inline bool > net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding) > { > - refcount_inc(&binding->ref); > + return refcount_inc_not_zero(&binding->ref); > } > > static inline void > @@ -112,7 +127,8 @@ net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding) > if (!refcount_dec_and_test(&binding->ref)) > return; > > - __net_devmem_dmabuf_binding_free(binding); > + INIT_WORK(&binding->unbind_w, __net_devmem_dmabuf_binding_free); > + schedule_work(&binding->unbind_w); > } > > void net_devmem_get_net_iov(struct net_iov *niov); > @@ -123,6 +139,11 @@ net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding); > void net_devmem_free_dmabuf(struct net_iov *ppiov); > > bool net_is_devmem_iov(struct net_iov *niov); > +struct net_devmem_dmabuf_binding * > +net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id); > +struct net_iov * > +net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr, > + size_t *off, size_t *size); > > #else > struct net_devmem_dmabuf_binding; > @@ -140,18 +161,23 @@ static inline void net_devmem_put_net_iov(struct net_iov *niov) > { > } > > -static inline void > -__net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding) > +static inline void __net_devmem_dmabuf_binding_free(struct work_struct *wq) > { > } > > static inline struct net_devmem_dmabuf_binding * > net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd, > + enum dma_data_direction direction, > struct netlink_ext_ack *extack) The order of arguments differs from the above definition (and also with the one in the net/core/devmem.c file) which could cause a failure in case CONFIG_NET_DEVMEM=n. I think it should instead be: static inline struct net_devmem_dmabuf_binding * net_devmem_bind_dmabuf(struct net_device *dev, + enum dma_data_direction direction, + unsigned int dmabuf_fd, struct netlink_ext_ack *extack) > { > return ERR_PTR(-EOPNOTSUPP); > } > Thanks, Praan