On Mon, Mar 03, 2025 at 05:28:05PM +0000, Matthew Wilcox (Oracle) wrote: > If a user calls p = kmalloc(1024); kfree(p); kfree(p); and 'p' was the > only object in the slab, we may free the slab after the first call to > kfree(). If we do, we clear PGTY_slab and the second call to kfree() > will call free_large_kmalloc(). That will leave a trace in the logs > ("object pointer: 0x%p"), but otherwise proceed to free the memory, > which is likely to corrupt the page allocator's metadata. > > Allocate a new page type for large kmalloc and mark the memory with it > while it's allocated. That lets us detect this double-free and return > without harming any data structures. > > Reported-by: Hannes Reinecke <hare@xxxxxxxx> > Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> > --- Looks good to me, Reviewed-by: Harry Yoo <harry.yoo@xxxxxxxxxx> -- Cheers, Harry > include/linux/page-flags.h | 18 ++++++++++-------- > mm/slub.c | 7 +++++++ > 2 files changed, 17 insertions(+), 8 deletions(-) > > diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h > index 36d283552f80..df9234e5f478 100644 > --- a/include/linux/page-flags.h > +++ b/include/linux/page-flags.h > @@ -925,14 +925,15 @@ FOLIO_FLAG_FALSE(has_hwpoisoned) > enum pagetype { > /* 0x00-0x7f are positive numbers, ie mapcount */ > /* Reserve 0x80-0xef for mapcount overflow. */ > - PGTY_buddy = 0xf0, > - PGTY_offline = 0xf1, > - PGTY_table = 0xf2, > - PGTY_guard = 0xf3, > - PGTY_hugetlb = 0xf4, > - PGTY_slab = 0xf5, > - PGTY_zsmalloc = 0xf6, > - PGTY_unaccepted = 0xf7, > + PGTY_buddy = 0xf0, > + PGTY_offline = 0xf1, > + PGTY_table = 0xf2, > + PGTY_guard = 0xf3, > + PGTY_hugetlb = 0xf4, > + PGTY_slab = 0xf5, > + PGTY_zsmalloc = 0xf6, > + PGTY_unaccepted = 0xf7, > + PGTY_large_kmalloc = 0xf8, > > PGTY_mapcount_underflow = 0xff > }; > @@ -1075,6 +1076,7 @@ PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc) > * Serialized with zone lock. > */ > PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted) > +FOLIO_TYPE_OPS(large_kmalloc, large_kmalloc) > > /** > * PageHuge - Determine if the page belongs to hugetlbfs > diff --git a/mm/slub.c b/mm/slub.c > index 1f50129dcfb3..872e1bab3bd1 100644 > --- a/mm/slub.c > +++ b/mm/slub.c > @@ -4241,6 +4241,7 @@ static void *___kmalloc_large_node(size_t size, gfp_t flags, int node) > ptr = folio_address(folio); > lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, > PAGE_SIZE << order); > + __folio_set_large_kmalloc(folio); > } > > ptr = kasan_kmalloc_large(ptr, size, flags); > @@ -4716,6 +4717,11 @@ static void free_large_kmalloc(struct folio *folio, void *object) > { > unsigned int order = folio_order(folio); > > + if (WARN_ON_ONCE(!folio_test_large_kmalloc(folio))) { > + dump_page(&folio->page, "Not a kmalloc allocation"); > + return; > + } > + > if (WARN_ON_ONCE(order == 0)) > pr_warn_once("object pointer: 0x%p\n", object); > > @@ -4725,6 +4731,7 @@ static void free_large_kmalloc(struct folio *folio, void *object) > > lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, > -(PAGE_SIZE << order)); > + __folio_clear_large_kmalloc(folio); > folio_put(folio); > } > > -- > 2.47.2 >