Patch "net: page_pool: don't try to stash the napi id" has been added to the 6.13-stable tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a note to let you know that I've just added the patch titled

    net: page_pool: don't try to stash the napi id

to the 6.13-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     net-page_pool-don-t-try-to-stash-the-napi-id.patch
and it can be found in the queue-6.13 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@xxxxxxxxxxxxxxx> know about it.



commit 9b18ce4470f47a37c33709bb746e93b5c54479e0
Author: Jakub Kicinski <kuba@xxxxxxxxxx>
Date:   Thu Jan 23 15:16:20 2025 -0800

    net: page_pool: don't try to stash the napi id
    
    [ Upstream commit 67e4bb2ced0f2d8fbd414f932daea94ba63ae4c4 ]
    
    Page ppol tried to cache the NAPI ID in page pool info to avoid
    having a dependency on the life cycle of the NAPI instance.
    Since commit under Fixes the NAPI ID is not populated until
    napi_enable() and there's a good chance that page pool is
    created before NAPI gets enabled.
    
    Protect the NAPI pointer with the existing page pool mutex,
    the reading path already holds it. napi_id itself we need
    to READ_ONCE(), it's protected by netdev_lock() which are
    not holding in page pool.
    
    Before this patch napi IDs were missing for mlx5:
    
     # ./cli.py --spec netlink/specs/netdev.yaml --dump page-pool-get
    
     [{'id': 144, 'ifindex': 2, 'inflight': 3072, 'inflight-mem': 12582912},
      {'id': 143, 'ifindex': 2, 'inflight': 5568, 'inflight-mem': 22806528},
      {'id': 142, 'ifindex': 2, 'inflight': 5120, 'inflight-mem': 20971520},
      {'id': 141, 'ifindex': 2, 'inflight': 4992, 'inflight-mem': 20447232},
      ...
    
    After:
    
     [{'id': 144, 'ifindex': 2, 'inflight': 3072, 'inflight-mem': 12582912,
       'napi-id': 565},
      {'id': 143, 'ifindex': 2, 'inflight': 4224, 'inflight-mem': 17301504,
       'napi-id': 525},
      {'id': 142, 'ifindex': 2, 'inflight': 4288, 'inflight-mem': 17563648,
       'napi-id': 524},
      ...
    
    Fixes: 86e25f40aa1e ("net: napi: Add napi_config")
    Reviewed-by: Mina Almasry <almasrymina@xxxxxxxxxx>
    Reviewed-by: Toke Høiland-Jørgensen <toke@xxxxxxxxxx>
    Link: https://patch.msgid.link/20250123231620.1086401-1-kuba@xxxxxxxxxx
    Signed-off-by: Jakub Kicinski <kuba@xxxxxxxxxx>
    Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>

diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index c022c410abe39..386efddd2aac0 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -236,7 +236,6 @@ struct page_pool {
 	struct {
 		struct hlist_node list;
 		u64 detach_time;
-		u32 napi_id;
 		u32 id;
 	} user;
 };
diff --git a/net/core/dev.c b/net/core/dev.c
index 71988f2f484b2..a994b1c725098 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6573,7 +6573,7 @@ void napi_resume_irqs(unsigned int napi_id)
 static void __napi_hash_add_with_id(struct napi_struct *napi,
 				    unsigned int napi_id)
 {
-	napi->napi_id = napi_id;
+	WRITE_ONCE(napi->napi_id, napi_id);
 	hlist_add_head_rcu(&napi->napi_hash_node,
 			   &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
 }
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index f89cf93f6eb45..32570333068d8 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -1108,7 +1108,9 @@ void page_pool_disable_direct_recycling(struct page_pool *pool)
 	WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state));
 	WARN_ON(READ_ONCE(pool->p.napi->list_owner) != -1);
 
+	mutex_lock(&page_pools_lock);
 	WRITE_ONCE(pool->p.napi, NULL);
+	mutex_unlock(&page_pools_lock);
 }
 EXPORT_SYMBOL(page_pool_disable_direct_recycling);
 
diff --git a/net/core/page_pool_priv.h b/net/core/page_pool_priv.h
index 57439787b9c2b..2fb06d5f6d559 100644
--- a/net/core/page_pool_priv.h
+++ b/net/core/page_pool_priv.h
@@ -7,6 +7,8 @@
 
 #include "netmem_priv.h"
 
+extern struct mutex page_pools_lock;
+
 s32 page_pool_inflight(const struct page_pool *pool, bool strict);
 
 int page_pool_list(struct page_pool *pool);
diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c
index 48335766c1bfd..6677e0c2e2565 100644
--- a/net/core/page_pool_user.c
+++ b/net/core/page_pool_user.c
@@ -3,6 +3,7 @@
 #include <linux/mutex.h>
 #include <linux/netdevice.h>
 #include <linux/xarray.h>
+#include <net/busy_poll.h>
 #include <net/net_debug.h>
 #include <net/netdev_rx_queue.h>
 #include <net/page_pool/helpers.h>
@@ -14,10 +15,11 @@
 #include "netdev-genl-gen.h"
 
 static DEFINE_XARRAY_FLAGS(page_pools, XA_FLAGS_ALLOC1);
-/* Protects: page_pools, netdevice->page_pools, pool->slow.netdev, pool->user.
+/* Protects: page_pools, netdevice->page_pools, pool->p.napi, pool->slow.netdev,
+ *	pool->user.
  * Ordering: inside rtnl_lock
  */
-static DEFINE_MUTEX(page_pools_lock);
+DEFINE_MUTEX(page_pools_lock);
 
 /* Page pools are only reachable from user space (via netlink) if they are
  * linked to a netdev at creation time. Following page pool "visibility"
@@ -216,6 +218,7 @@ page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
 {
 	struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
 	size_t inflight, refsz;
+	unsigned int napi_id;
 	void *hdr;
 
 	hdr = genlmsg_iput(rsp, info);
@@ -229,8 +232,10 @@ page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
 	    nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX,
 			pool->slow.netdev->ifindex))
 		goto err_cancel;
-	if (pool->user.napi_id &&
-	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, pool->user.napi_id))
+
+	napi_id = pool->p.napi ? READ_ONCE(pool->p.napi->napi_id) : 0;
+	if (napi_id >= MIN_NAPI_ID &&
+	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, napi_id))
 		goto err_cancel;
 
 	inflight = page_pool_inflight(pool, false);
@@ -319,8 +324,6 @@ int page_pool_list(struct page_pool *pool)
 	if (pool->slow.netdev) {
 		hlist_add_head(&pool->user.list,
 			       &pool->slow.netdev->page_pools);
-		pool->user.napi_id = pool->p.napi ? pool->p.napi->napi_id : 0;
-
 		netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_ADD_NTF);
 	}
 




[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux