On Mon, 30 May 2022 12:27:05 -0700 Jakub Kicinski wrote: > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > index e008a3df0485..360a545ee5e8 100644 > --- a/mm/page_alloc.c > +++ b/mm/page_alloc.c > @@ -5537,6 +5537,7 @@ EXPORT_SYMBOL(free_pages); > * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. > */ > static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, > + unsigned int fragsz, > gfp_t gfp_mask) > { > struct page *page = NULL; > @@ -5549,7 +5550,7 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, > PAGE_FRAG_CACHE_MAX_ORDER); > nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; > #endif > - if (unlikely(!page)) > + if (unlikely(!page && fragsz <= PAGE_SIZE)) > page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); > > nc->va = page ? page_address(page) : NULL; > @@ -5576,7 +5577,7 @@ void *page_frag_alloc_align(struct page_frag_cache *nc, > > if (unlikely(!nc->va)) { > refill: > - page = __page_frag_cache_refill(nc, gfp_mask); > + page = __page_frag_cache_refill(nc, fragsz, gfp_mask); > if (!page) > return NULL; Oh, well, the reuse also needs an update. We can slap a similar condition next to the pfmemalloc check.