On Fri, 2024-07-19 at 17:33 +0800, Yunsheng Lin wrote: > Refactor common codes from __page_frag_alloc_va_align() > to __page_frag_cache_refill(), so that the new API can > make use of them. > > CC: Alexander Duyck <alexander.duyck@xxxxxxxxx> > Signed-off-by: Yunsheng Lin <linyunsheng@xxxxxxxxxx> > --- > include/linux/page_frag_cache.h | 2 +- > mm/page_frag_cache.c | 93 +++++++++++++++++---------------- > 2 files changed, 49 insertions(+), 46 deletions(-) > > diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h > index 12a16f8e8ad0..5aa45de7a9a5 100644 > --- a/include/linux/page_frag_cache.h > +++ b/include/linux/page_frag_cache.h > @@ -50,7 +50,7 @@ static inline void *encoded_page_address(unsigned long encoded_va) > > static inline void page_frag_cache_init(struct page_frag_cache *nc) > { > - nc->encoded_va = 0; > + memset(nc, 0, sizeof(*nc)); > } > I do not like requiring the entire structure to be reset as a part of init. If encoded_va is 0 then we have reset the page and the flags. There shouldn't be anything else we need to reset as remaining and bias will be reset when we reallocate. > static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc) > diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c > index 7928e5d50711..d9c9cad17af7 100644 > --- a/mm/page_frag_cache.c > +++ b/mm/page_frag_cache.c > @@ -19,6 +19,28 @@ > #include <linux/page_frag_cache.h> > #include "internal.h" > > +static struct page *__page_frag_cache_recharge(struct page_frag_cache *nc) > +{ > + unsigned long encoded_va = nc->encoded_va; > + struct page *page; > + > + page = virt_to_page((void *)encoded_va); > + if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) > + return NULL; > + > + if (unlikely(encoded_page_pfmemalloc(encoded_va))) { > + VM_BUG_ON(compound_order(page) != > + encoded_page_order(encoded_va)); > + free_unref_page(page, encoded_page_order(encoded_va)); > + return NULL; > + } > + > + /* OK, page count is 0, we can safely set it */ > + set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); > + > + return page; > +} > + > static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, > gfp_t gfp_mask) > { > @@ -26,6 +48,14 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, > struct page *page = NULL; > gfp_t gfp = gfp_mask; > > + if (likely(nc->encoded_va)) { > + page = __page_frag_cache_recharge(nc); > + if (page) { > + order = encoded_page_order(nc->encoded_va); > + goto out; > + } > + } > + This code has no business here. This is refill, you just dropped recharge in here which will make a complete mess of the ordering and be confusing to say the least. The expectation was that if we are calling this function it is going to overwrite the virtual address to NULL on failure so we discard the old page if there is one present. This changes that behaviour. What you effectively did is made __page_frag_cache_refill into the recharge function. > #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) > gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) | __GFP_COMP | > __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC; > @@ -35,7 +65,7 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, > if (unlikely(!page)) { > page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); > if (unlikely(!page)) { > - nc->encoded_va = 0; > + memset(nc, 0, sizeof(*nc)); > return NULL; > } > The memset will take a few more instructions than the existing code did. I would prefer to keep this as is if at all possible. > @@ -45,6 +75,16 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, > nc->encoded_va = encode_aligned_va(page_address(page), order, > page_is_pfmemalloc(page)); > > + /* Even if we own the page, we do not use atomic_set(). > + * This would break get_page_unless_zero() users. > + */ > + page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); > + > +out: > + /* reset page count bias and remaining to start of new frag */ > + nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; > + nc->remaining = PAGE_SIZE << order; > + > return page; > } > Why bother returning a page at all? It doesn't seem like you don't use it anymore. It looks like the use cases you have for it in patch 11/12 all appear to be broken from what I can tell as you are adding page as a variable when we don't need to be passing internal details to the callers of the function when just a simple error return code would do. > @@ -55,7 +95,7 @@ void page_frag_cache_drain(struct page_frag_cache *nc) > > __page_frag_cache_drain(virt_to_head_page((void *)nc->encoded_va), > nc->pagecnt_bias); > - nc->encoded_va = 0; > + memset(nc, 0, sizeof(*nc)); > } > EXPORT_SYMBOL(page_frag_cache_drain); > > @@ -72,31 +112,9 @@ void *__page_frag_alloc_va_align(struct page_frag_cache *nc, > unsigned int fragsz, gfp_t gfp_mask, > unsigned int align_mask) > { > - unsigned long encoded_va = nc->encoded_va; > - unsigned int size, remaining; > - struct page *page; > - > - if (unlikely(!encoded_va)) { > -refill: > - page = __page_frag_cache_refill(nc, gfp_mask); > - if (!page) > - return NULL; > - > - encoded_va = nc->encoded_va; > - size = page_frag_cache_page_size(encoded_va); > + unsigned int size = page_frag_cache_page_size(nc->encoded_va); > + unsigned int remaining = nc->remaining & align_mask; > > - /* Even if we own the page, we do not use atomic_set(). > - * This would break get_page_unless_zero() users. > - */ > - page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); > - > - /* reset page count bias and remaining to start of new frag */ > - nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; > - nc->remaining = size; > - } > - > - size = page_frag_cache_page_size(encoded_va); > - remaining = nc->remaining & align_mask; > if (unlikely(remaining < fragsz)) { I am not a fan of adding a dependency on remaining being set *before* encoded_va. The fact is it relies on the size to set it. In addition this is creating a big blob of code for the conditional paths to have to jump over. I think it is much better to first validate encoded_va, and then validate remaining. Otherwise just checking remaining seems problematic and like a recipe for NULL pointer accesses. > if (unlikely(fragsz > PAGE_SIZE)) { > /* > @@ -111,32 +129,17 @@ void *__page_frag_alloc_va_align(struct page_frag_cache *nc, > return NULL; > } > > - page = virt_to_page((void *)encoded_va); > - > - if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) > - goto refill; > - > - if (unlikely(encoded_page_pfmemalloc(encoded_va))) { > - VM_BUG_ON(compound_order(page) != > - encoded_page_order(encoded_va)); > - free_unref_page(page, encoded_page_order(encoded_va)); > - goto refill; > - } > - > - /* OK, page count is 0, we can safely set it */ > - set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); > - > - /* reset page count bias and remaining to start of new frag */ > - nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; > - nc->remaining = size; > + if (unlikely(!__page_frag_cache_refill(nc, gfp_mask))) > + return NULL; > > + size = page_frag_cache_page_size(nc->encoded_va); So this is adding yet another setting/reading of size to the recharge path now. Previously the recharge path could just reuse the existing size. > remaining = size; > } > > nc->pagecnt_bias--; > nc->remaining = remaining - fragsz; > > - return encoded_page_address(encoded_va) + (size - remaining); > + return encoded_page_address(nc->encoded_va) + (size - remaining); > } > EXPORT_SYMBOL(__page_frag_alloc_va_align); >