On 12/28/23 09:57, Matthew Wilcox (Oracle) wrote: > Mirror the code in free_large_kmalloc() and alloc_pages_node() > and use a folio directly. Avoid the use of folio_alloc() as > that will set up an rmappable folio which we do not want here. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Reviewed-by: Vlastimil Babka <vbabka@xxxxxxx> > --- > mm/slub.c | 10 +++++----- > 1 file changed, 5 insertions(+), 5 deletions(-) > > diff --git a/mm/slub.c b/mm/slub.c > index 58b4936f2a29..71d5840de65d 100644 > --- a/mm/slub.c > +++ b/mm/slub.c > @@ -3915,7 +3915,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); > */ > static void *__kmalloc_large_node(size_t size, gfp_t flags, int node) > { > - struct page *page; > + struct folio *folio; > void *ptr = NULL; > unsigned int order = get_order(size); > > @@ -3923,10 +3923,10 @@ static void *__kmalloc_large_node(size_t size, gfp_t flags, int node) > flags = kmalloc_fix_flags(flags); > > flags |= __GFP_COMP; > - page = alloc_pages_node(node, flags, order); > - if (page) { > - ptr = page_address(page); > - mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, > + folio = (struct folio *)alloc_pages_node(node, flags, order); > + if (folio) { > + ptr = folio_address(folio); > + lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, > PAGE_SIZE << order); > } >