On Thu, Jan 05, 2023 at 09:46:12PM +0000, Matthew Wilcox (Oracle) wrote: > Convert __page_pool_alloc_page_order() and __page_pool_alloc_pages_slow() > to use netmem internally. This removes a couple of calls > to compound_head() that are hidden inside put_page(). > Convert trace_page_pool_state_hold(), page_pool_dma_map() and > page_pool_set_pp_info() to take a netmem argument. > > Saves 83 bytes of text in __page_pool_alloc_page_order() and 98 in > __page_pool_alloc_pages_slow() for a total of 181 bytes. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> > --- > include/trace/events/page_pool.h | 14 +++++------ > net/core/page_pool.c | 42 +++++++++++++++++--------------- > 2 files changed, 29 insertions(+), 27 deletions(-) > > diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h > index 113aad0c9e5b..d1237a7ce481 100644 > --- a/include/trace/events/page_pool.h > +++ b/include/trace/events/page_pool.h > @@ -67,26 +67,26 @@ TRACE_EVENT(page_pool_state_release, > TRACE_EVENT(page_pool_state_hold, > > TP_PROTO(const struct page_pool *pool, > - const struct page *page, u32 hold), > + const struct netmem *nmem, u32 hold), > > - TP_ARGS(pool, page, hold), > + TP_ARGS(pool, nmem, hold), > > TP_STRUCT__entry( > __field(const struct page_pool *, pool) > - __field(const struct page *, page) > + __field(const struct netmem *, nmem) > __field(u32, hold) > __field(unsigned long, pfn) > ), > > TP_fast_assign( > __entry->pool = pool; > - __entry->page = page; > + __entry->nmem = nmem; > __entry->hold = hold; > - __entry->pfn = page_to_pfn(page); > + __entry->pfn = netmem_pfn(nmem); > ), > > - TP_printk("page_pool=%p page=%p pfn=0x%lx hold=%u", > - __entry->pool, __entry->page, __entry->pfn, __entry->hold) > + TP_printk("page_pool=%p netmem=%p pfn=0x%lx hold=%u", > + __entry->pool, __entry->nmem, __entry->pfn, __entry->hold) > ); > > TRACE_EVENT(page_pool_update_nid, > diff --git a/net/core/page_pool.c b/net/core/page_pool.c > index 437241aba5a7..4e985502c569 100644 > --- a/net/core/page_pool.c > +++ b/net/core/page_pool.c > @@ -304,8 +304,9 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool, > pool->p.dma_dir); > } > > -static bool page_pool_dma_map(struct page_pool *pool, struct page *page) > +static bool page_pool_dma_map(struct page_pool *pool, struct netmem *nmem) > { > + struct page *page = netmem_page(nmem); > dma_addr_t dma; > > /* Setup DMA mapping: use 'struct page' area for storing DMA-addr > @@ -328,12 +329,12 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page) > } > > static void page_pool_set_pp_info(struct page_pool *pool, > - struct page *page) > + struct netmem *nmem) > { > - page->pp = pool; > - page->pp_magic |= PP_SIGNATURE; > + nmem->pp = pool; > + nmem->pp_magic |= PP_SIGNATURE; > if (pool->p.init_callback) > - pool->p.init_callback(page, pool->p.init_arg); > + pool->p.init_callback(netmem_page(nmem), pool->p.init_arg); > } > > static void page_pool_clear_pp_info(struct netmem *nmem) > @@ -345,26 +346,26 @@ static void page_pool_clear_pp_info(struct netmem *nmem) > static struct page *__page_pool_alloc_page_order(struct page_pool *pool, > gfp_t gfp) > { > - struct page *page; > + struct netmem *nmem; > > gfp |= __GFP_COMP; > - page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); > - if (unlikely(!page)) > + nmem = page_netmem(alloc_pages_node(pool->p.nid, gfp, pool->p.order)); > + if (unlikely(!nmem)) > return NULL; > > if ((pool->p.flags & PP_FLAG_DMA_MAP) && > - unlikely(!page_pool_dma_map(pool, page))) { > - put_page(page); > + unlikely(!page_pool_dma_map(pool, nmem))) { > + netmem_put(nmem); > return NULL; > } > > alloc_stat_inc(pool, slow_high_order); > - page_pool_set_pp_info(pool, page); > + page_pool_set_pp_info(pool, nmem); > > /* Track how many pages are held 'in-flight' */ > pool->pages_state_hold_cnt++; > - trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); > - return page; > + trace_page_pool_state_hold(pool, nmem, pool->pages_state_hold_cnt); > + return netmem_page(nmem); > } > > /* slow path */ > @@ -398,18 +399,18 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, > * page element have not been (possibly) DMA mapped. > */ > for (i = 0; i < nr_pages; i++) { > - page = pool->alloc.cache[i]; > + struct netmem *nmem = page_netmem(pool->alloc.cache[i]); > if ((pp_flags & PP_FLAG_DMA_MAP) && > - unlikely(!page_pool_dma_map(pool, page))) { > - put_page(page); > + unlikely(!page_pool_dma_map(pool, nmem))) { > + netmem_put(nmem); > continue; > } > > - page_pool_set_pp_info(pool, page); > - pool->alloc.cache[pool->alloc.count++] = page; > + page_pool_set_pp_info(pool, nmem); > + pool->alloc.cache[pool->alloc.count++] = netmem_page(nmem); > /* Track how many pages are held 'in-flight' */ > pool->pages_state_hold_cnt++; > - trace_page_pool_state_hold(pool, page, > + trace_page_pool_state_hold(pool, nmem, > pool->pages_state_hold_cnt); > } > > @@ -421,7 +422,8 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, > page = NULL; > } > > - /* When page just alloc'ed is should/must have refcnt 1. */ > + /* When page just allocated it should have refcnt 1 (but may have > + * speculative references) */ > return page; > } > > -- > 2.35.1 > Reviewed-by: Ilias Apalodimas <ilias.apalodimas@xxxxxxxxxx>