On 29/06/2021 15.48, Chuck Lever wrote:
The call site in __page_pool_alloc_pages_slow() also seems to be
confused on this matter. It should be attended to by someone who
is familiar with that code.
I don't think we need a fix for __page_pool_alloc_pages_slow(), as the
array is guaranteed to be empty.
But a fix would look like this:
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index c137ce308c27..1b04538a3da3 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -245,22 +245,23 @@ static struct page
*__page_pool_alloc_pages_slow(struct page_pool *pool,
if (unlikely(pp_order))
return __page_pool_alloc_page_order(pool, gfp);
/* Unnecessary as alloc cache is empty, but guarantees zero
count */
- if (unlikely(pool->alloc.count > 0))
+ if (unlikely(pool->alloc.count > 0)) // ^^^^^^^^^^^^^^^^^^^^^^
return pool->alloc.cache[--pool->alloc.count];
/* Mark empty alloc.cache slots "empty" for
alloc_pages_bulk_array */
memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
+ /* bulk API ret value also count existing pages, but array is
empty */
nr_pages = alloc_pages_bulk_array(gfp, bulk, pool->alloc.cache);
if (unlikely(!nr_pages))
return NULL;
/* Pages have been filled into alloc.cache array, but count is
zero and
* page element have not been (possibly) DMA mapped.
*/
- for (i = 0; i < nr_pages; i++) {
+ for (i = pool->alloc.count; i < nr_pages; i++) {
page = pool->alloc.cache[i];
if ((pp_flags & PP_FLAG_DMA_MAP) &&
unlikely(!page_pool_dma_map(pool, page))) {
put_page(page);