From: Pavel Begunkov <asml.silence@xxxxxxxxx> There is a good bunch of places in generic paths assuming that the only page pool memory provider is devmem TCP. As we want to reuse the net_iov and provider infrastructure, we need to patch it up and explicitly check the provider type when we branch into devmem TCP code. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> Signed-off-by: David Wei <dw@xxxxxxxxxxx> --- net/core/devmem.c | 4 ++-- net/core/page_pool_user.c | 15 +++++++++------ net/ipv4/tcp.c | 6 ++++++ 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/net/core/devmem.c b/net/core/devmem.c index 83d13eb441b6..b0733cf42505 100644 --- a/net/core/devmem.c +++ b/net/core/devmem.c @@ -314,10 +314,10 @@ void dev_dmabuf_uninstall(struct net_device *dev) unsigned int i; for (i = 0; i < dev->real_num_rx_queues; i++) { - binding = dev->_rx[i].mp_params.mp_priv; - if (!binding) + if (dev->_rx[i].mp_params.mp_ops != &dmabuf_devmem_ops) continue; + binding = dev->_rx[i].mp_params.mp_priv; xa_for_each(&binding->bound_rxqs, xa_idx, rxq) if (rxq == &dev->_rx[i]) { xa_erase(&binding->bound_rxqs, xa_idx); diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c index 48335766c1bf..0d6cb7fb562c 100644 --- a/net/core/page_pool_user.c +++ b/net/core/page_pool_user.c @@ -214,7 +214,7 @@ static int page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool, const struct genl_info *info) { - struct net_devmem_dmabuf_binding *binding = pool->mp_priv; + struct net_devmem_dmabuf_binding *binding; size_t inflight, refsz; void *hdr; @@ -244,8 +244,11 @@ page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool, pool->user.detach_time)) goto err_cancel; - if (binding && nla_put_u32(rsp, NETDEV_A_PAGE_POOL_DMABUF, binding->id)) - goto err_cancel; + if (pool->mp_ops == &dmabuf_devmem_ops) { + binding = pool->mp_priv; + if (nla_put_u32(rsp, NETDEV_A_PAGE_POOL_DMABUF, binding->id)) + goto err_cancel; + } genlmsg_end(rsp, hdr); @@ -353,16 +356,16 @@ void page_pool_unlist(struct page_pool *pool) int page_pool_check_memory_provider(struct net_device *dev, struct netdev_rx_queue *rxq) { - struct net_devmem_dmabuf_binding *binding = rxq->mp_params.mp_priv; + void *mp_priv = rxq->mp_params.mp_priv; struct page_pool *pool; struct hlist_node *n; - if (!binding) + if (!mp_priv) return 0; mutex_lock(&page_pools_lock); hlist_for_each_entry_safe(pool, n, &dev->page_pools, user.list) { - if (pool->mp_priv != binding) + if (pool->mp_priv != mp_priv) continue; if (pool->slow.queue_idx == get_netdev_rx_queue_index(rxq)) { diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 5feef46426f4..2140fa1ec9f8 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -277,6 +277,7 @@ #include <net/ip.h> #include <net/sock.h> #include <net/rstreason.h> +#include <net/page_pool/types.h> #include <linux/uaccess.h> #include <asm/ioctls.h> @@ -2475,6 +2476,11 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb, } niov = skb_frag_net_iov(frag); + if (niov->pp->mp_ops != &dmabuf_devmem_ops) { + err = -ENODEV; + goto out; + } + end = start + skb_frag_size(frag); copy = end - offset; -- 2.43.5