On 27/04/2023 12.47, Jesper Dangaard Brouer wrote:
On 27/04/2023 02.57, Yunsheng Lin wrote:
On 2023/4/26 1:15, Jesper Dangaard Brouer wrote:
@@ -609,6 +609,8 @@ void page_pool_put_defragged_page(struct
page_pool *pool, struct page *page,
recycle_stat_inc(pool, ring_full);
page_pool_return_page(pool, page);
}
+ if (pool->p.flags & PP_FLAG_SHUTDOWN)
+ page_pool_shutdown_attempt(pool);
It seems we have allowed page_pool_shutdown_attempt() to be called
concurrently here, isn't there a time window between
atomic_inc_return_relaxed()
and page_pool_inflight() for pool->pages_state_release_cnt, which may
cause
double calling of page_pool_free()?
Yes, I think that is correct.
I actually woke up this morning thinking of this case of double freeing,
and this time window. Thanks for spotting and confirming this issue.
Basically: Two concurrent CPUs executing page_pool_shutdown_attempt()
can both end-up seeing inflight equal zero, resulting in both of them
kfreeing the memory (in page_pool_free()) as they both think they are
the last user of PP instance.
I've been thinking how to address this.
This is my current idea:
(1) Atomic variable inc and test (or cmpxchg) that resolves last user race.
(2) Defer free to call_rcu callback to let other CPUs finish.
(3) Might need rcu_read_lock() in page_pool_shutdown_attempt().
I think I found a more simply approach (adjustment patch attached).
That avoids races and any call_rcu callbacks.
Will post a V2.
--Jesper
fix race
From: Jesper Dangaard Brouer <brouer@xxxxxxxxxx>
Signed-off-by: Jesper Dangaard Brouer <brouer@xxxxxxxxxx>
---
net/core/page_pool.c | 48 ++++++++++++++++++++++++++++++++++--------------
1 file changed, 34 insertions(+), 14 deletions(-)
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index ce7e8dda6403..25139b162674 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -451,9 +451,8 @@ EXPORT_SYMBOL(page_pool_alloc_pages);
*/
#define _distance(a, b) (s32)((a) - (b))
-static s32 page_pool_inflight(struct page_pool *pool)
+static s32 __page_pool_inflight(struct page_pool *pool, u32 release_cnt)
{
- u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
s32 inflight;
@@ -465,6 +464,14 @@ static s32 page_pool_inflight(struct page_pool *pool)
return inflight;
}
+static s32 page_pool_inflight(struct page_pool *pool)
+{
+ u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
+ return __page_pool_inflight(pool, release_cnt);
+}
+
+static int page_pool_free_attempt(struct page_pool *pool, u32 release_cnt);
+
/* Disconnects a page (from a page_pool). API users can have a need
* to disconnect a page (from a page_pool), to allow it to be used as
* a regular page (that will eventually be returned to the normal
@@ -473,7 +480,7 @@ static s32 page_pool_inflight(struct page_pool *pool)
void page_pool_release_page(struct page_pool *pool, struct page *page)
{
dma_addr_t dma;
- int count;
+ u32 count;
if (!(pool->p.flags & PP_FLAG_DMA_MAP))
/* Always account for inflight pages, even if we didn't
@@ -490,8 +497,12 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
page_pool_set_dma_addr(page, 0);
skip_dma_unmap:
page_pool_clear_pp_info(page);
- count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
+ count = atomic_inc_return(&pool->pages_state_release_cnt);
trace_page_pool_state_release(pool, page, count);
+
+ /* In shutdown phase, last page will free pool instance */
+ if (pool->p.flags & PP_FLAG_SHUTDOWN)
+ page_pool_free_attempt(pool, count);
}
EXPORT_SYMBOL(page_pool_release_page);
@@ -543,7 +554,7 @@ static bool page_pool_recycle_in_cache(struct page *page,
return true;
}
-static void page_pool_shutdown_attempt(struct page_pool *pool);
+static void page_pool_empty_ring(struct page_pool *pool);
/* If the page refcnt == 1, this will try to recycle the page.
* if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
@@ -610,7 +621,7 @@ void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
page_pool_return_page(pool, page);
}
if (pool->p.flags & PP_FLAG_SHUTDOWN)
- page_pool_shutdown_attempt(pool);
+ page_pool_empty_ring(pool);
}
EXPORT_SYMBOL(page_pool_put_defragged_page);
@@ -660,7 +671,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
out:
if (pool->p.flags & PP_FLAG_SHUTDOWN)
- page_pool_shutdown_attempt(pool);
+ page_pool_empty_ring(pool);
}
EXPORT_SYMBOL(page_pool_put_page_bulk);
@@ -743,6 +754,7 @@ struct page *page_pool_alloc_frag(struct page_pool *pool,
}
EXPORT_SYMBOL(page_pool_alloc_frag);
+noinline
static void page_pool_empty_ring(struct page_pool *pool)
{
struct page *page;
@@ -802,22 +814,28 @@ static void page_pool_scrub(struct page_pool *pool)
page_pool_empty_ring(pool);
}
-static int page_pool_release(struct page_pool *pool)
+noinline
+static int page_pool_free_attempt(struct page_pool *pool, u32 release_cnt)
{
int inflight;
- page_pool_scrub(pool);
- inflight = page_pool_inflight(pool);
+ inflight = __page_pool_inflight(pool, release_cnt);
if (!inflight)
page_pool_free(pool);
return inflight;
}
-noinline
-static void page_pool_shutdown_attempt(struct page_pool *pool)
+static int page_pool_release(struct page_pool *pool)
{
- page_pool_release(pool);
+ int inflight;
+
+ page_pool_scrub(pool);
+ inflight = page_pool_inflight(pool);
+ if (!inflight)
+ page_pool_free(pool);
+
+ return inflight;
}
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
@@ -861,7 +879,9 @@ void page_pool_destroy(struct page_pool *pool)
* Enter into shutdown phase, and retry release to handle races.
*/
pool->p.flags |= PP_FLAG_SHUTDOWN;
- page_pool_shutdown_attempt(pool);
+
+ /* Concurrent CPUs could have returned last pages into ptr_ring */
+ page_pool_empty_ring(pool);
}
EXPORT_SYMBOL(page_pool_destroy);