1. https://lore.kernel.org/all/caf31b5e-0e8f-4844-b7ba-ef59ed13b74e@xxxxxxx/
CC: Robin Murphy <robin.murphy@xxxxxxx>
CC: Alexander Duyck <alexander.duyck@xxxxxxxxx>
CC: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
CC: IOMMU <iommu@xxxxxxxxxxxxxxx>
CC: MM <linux-mm@xxxxxxxxx>
Fixes: f71fec47c2df ("page_pool: make sure struct device is stable")
Signed-off-by: Yunsheng Lin <linyunsheng@xxxxxxxxxx>
---
net/core/page_pool.c | 25 ++++++++++++++++++-------
1 file changed, 18 insertions(+), 7 deletions(-)
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 33a314abbba4..0bde7c6c781a 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -712,7 +712,8 @@ static void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
rcu_read_unlock();
}
-static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem)
+static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem,
+ unsigned int dma_sync_size)
{
int ret;
/* BH protection not needed if current is softirq */
@@ -723,10 +724,13 @@ static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem)
if (!ret) {
recycle_stat_inc(pool, ring);
- return true;
+
+ rcu_read_lock();
+ page_pool_dma_sync_for_device(pool, netmem, dma_sync_size);
+ rcu_read_unlock();
}
- return false;
+ return !ret;
}
/* Only allow direct recycling in special circumstances, into the
@@ -779,10 +783,11 @@ __page_pool_put_page(struct page_pool *pool, netmem_ref netmem,
if (likely(__page_pool_page_can_be_recycled(netmem))) {
/* Read barrier done in page_ref_count / READ_ONCE */
- page_pool_dma_sync_for_device(pool, netmem, dma_sync_size);
-
- if (allow_direct && page_pool_recycle_in_cache(netmem, pool))
+ if (allow_direct && page_pool_recycle_in_cache(netmem, pool)) {
+ page_pool_dma_sync_for_device(pool, netmem,
+ dma_sync_size);
return 0;
+ }
/* Page found as candidate for recycling */
return netmem;
@@ -845,7 +850,7 @@ void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
netmem =
__page_pool_put_page(pool, netmem, dma_sync_size, allow_direct);
- if (netmem && !page_pool_recycle_in_ring(pool, netmem)) {
+ if (netmem && !page_pool_recycle_in_ring(pool, netmem, dma_sync_size)) {
/* Cache full, fallback to free pages */
recycle_stat_inc(pool, ring_full);
page_pool_return_page(pool, netmem);
@@ -903,14 +908,18 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
/* Bulk producer into ptr_ring page_pool cache */
in_softirq = page_pool_producer_lock(pool);
+ rcu_read_lock();
for (i = 0; i < bulk_len; i++) {
if (__ptr_ring_produce(&pool->ring, data[i])) {
/* ring full */
recycle_stat_inc(pool, ring_full);
break;
}
+ page_pool_dma_sync_for_device(pool, (__force netmem_ref)data[i],
+ -1);
}
recycle_stat_add(pool, ring, i);
+ rcu_read_unlock();
page_pool_producer_unlock(pool, in_softirq);
/* Hopefully all pages was return into ptr_ring */
@@ -1200,6 +1209,8 @@ void page_pool_destroy(struct page_pool *pool)
if (!page_pool_release(pool))
return;
+ pool->dma_sync = false;
+
/* Paired with rcu lock in page_pool_napi_local() to enable clearing
* of pool->p.napi in page_pool_disable_direct_recycling() is seen
* before returning to driver to free the napi instance.