On Tue, 2024-06-25 at 21:52 +0800, Yunsheng Lin wrote: > There are many use cases that need minimum memory in order > for forward progress, but more performant if more memory is > available or need to probe the cache info to use any memory > available for frag caoleasing reason. > > Currently skb_page_frag_refill() API is used to solve the > above use cases, but caller needs to know about the internal > detail and access the data field of 'struct page_frag' to > meet the requirement of the above use cases and its > implementation is similar to the one in mm subsystem. > > To unify those two page_frag implementations, introduce a > prepare API to ensure minimum memory is satisfied and return > how much the actual memory is available to the caller and a > probe API to report the current available memory to caller > without doing cache refilling. The caller needs to either call > the commit API to report how much memory it actually uses, or > not do so if deciding to not use any memory. > > As next patch is about to replace 'struct page_frag' with > 'struct page_frag_cache' in linux/sched.h, which is included > by the asm-offsets.s, using the virt_to_page() in the inline > helper of page_frag_cache.h cause a "'vmemmap' undeclared" > compiling error for asm-offsets.s, use a macro for probe API > to avoid that compiling error. > > CC: Alexander Duyck <alexander.duyck@xxxxxxxxx> > Signed-off-by: Yunsheng Lin <linyunsheng@xxxxxxxxxx> > --- > include/linux/page_frag_cache.h | 82 +++++++++++++++++++++++ > mm/page_frag_cache.c | 114 ++++++++++++++++++++++++++++++++ > 2 files changed, 196 insertions(+) > > diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h > index b33904d4494f..e95d44a36ec9 100644 > --- a/include/linux/page_frag_cache.h > +++ b/include/linux/page_frag_cache.h > @@ -4,6 +4,7 @@ > #define _LINUX_PAGE_FRAG_CACHE_H > > #include <linux/gfp_types.h> > +#include <linux/mmdebug.h> > > #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) > #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) > @@ -87,6 +88,9 @@ static inline unsigned int page_frag_cache_page_size(struct encoded_va *encoded_ > > void page_frag_cache_drain(struct page_frag_cache *nc); > void __page_frag_cache_drain(struct page *page, unsigned int count); > +struct page *page_frag_alloc_pg(struct page_frag_cache *nc, > + unsigned int *offset, unsigned int fragsz, > + gfp_t gfp); > void *__page_frag_alloc_va_align(struct page_frag_cache *nc, > unsigned int fragsz, gfp_t gfp_mask, > unsigned int align_mask); > @@ -99,12 +103,90 @@ static inline void *page_frag_alloc_va_align(struct page_frag_cache *nc, > return __page_frag_alloc_va_align(nc, fragsz, gfp_mask, -align); > } > > +static inline unsigned int page_frag_cache_page_offset(const struct page_frag_cache *nc) > +{ > + return page_frag_cache_page_size(nc->encoded_va) - nc->remaining; > +} > + > static inline void *page_frag_alloc_va(struct page_frag_cache *nc, > unsigned int fragsz, gfp_t gfp_mask) > { > return __page_frag_alloc_va_align(nc, fragsz, gfp_mask, ~0u); > } > > +void *page_frag_alloc_va_prepare(struct page_frag_cache *nc, unsigned int *fragsz, > + gfp_t gfp); > + > +static inline void *page_frag_alloc_va_prepare_align(struct page_frag_cache *nc, > + unsigned int *fragsz, > + gfp_t gfp, > + unsigned int align) > +{ > + WARN_ON_ONCE(!is_power_of_2(align) || align > PAGE_SIZE); > + nc->remaining = nc->remaining & -align; > + return page_frag_alloc_va_prepare(nc, fragsz, gfp); > +} > + > +struct page *page_frag_alloc_pg_prepare(struct page_frag_cache *nc, > + unsigned int *offset, > + unsigned int *fragsz, gfp_t gfp); > + > +struct page *page_frag_alloc_prepare(struct page_frag_cache *nc, > + unsigned int *offset, > + unsigned int *fragsz, > + void **va, gfp_t gfp); > + > +static inline struct encoded_va *__page_frag_alloc_probe(struct page_frag_cache *nc, > + unsigned int *offset, > + unsigned int *fragsz, > + void **va) > +{ > + struct encoded_va *encoded_va; > + > + *fragsz = nc->remaining; > + encoded_va = nc->encoded_va; > + *offset = page_frag_cache_page_size(encoded_va) - *fragsz; > + *va = encoded_page_address(encoded_va) + *offset; > + > + return encoded_va; > +} > + > +#define page_frag_alloc_probe(nc, offset, fragsz, va) \ > +({ \ > + struct page *__page = NULL; \ > + \ > + VM_BUG_ON(!*(fragsz)); \ > + if (likely((nc)->remaining >= *(fragsz))) \ > + __page = virt_to_page(__page_frag_alloc_probe(nc, \ > + offset, \ > + fragsz, \ > + va)); \ > + \ > + __page; \ > +}) > + Why is this a macro instead of just being an inline? Are you trying to avoid having to include a header due to the virt_to_page?