From: Suren Baghdasaryan <surenb@xxxxxxxxxx> commit b3bebe44306e23827397d0d774d206e3fa374041 upstream. Outline and export free_reserved_page() because modules use it and it in turn uses page_ext_{get|put} which should not be exported. The same result could be obtained by outlining {get|put}_page_tag_ref() but that would have higher performance impact as these functions are used in more performance critical paths. Link: https://lkml.kernel.org/r/20240717212844.2749975-1-surenb@xxxxxxxxxx Fixes: dcfe378c81f7 ("lib: introduce support for page allocation tagging") Signed-off-by: Suren Baghdasaryan <surenb@xxxxxxxxxx> Reported-by: kernel test robot <lkp@xxxxxxxxx> Closes: https://lore.kernel.org/oe-kbuild-all/202407080044.DWMC9N9I-lkp@xxxxxxxxx/ Suggested-by: Christoph Hellwig <hch@xxxxxxxxxxxxx> Suggested-by: Vlastimil Babka <vbabka@xxxxxxx> Acked-by: Vlastimil Babka <vbabka@xxxxxxx> Cc: Kees Cook <keescook@xxxxxxxxxxxx> Cc: Kent Overstreet <kent.overstreet@xxxxxxxxx> Cc: Pasha Tatashin <pasha.tatashin@xxxxxxxxxx> Cc: Sourav Panda <souravpanda@xxxxxxxxxx> Cc: <stable@xxxxxxxxxxxxxxx> [6.10] Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- include/linux/mm.h | 16 +--------------- mm/page_alloc.c | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 15 deletions(-) --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3177,21 +3177,7 @@ extern void reserve_bootmem_region(phys_ phys_addr_t end, int nid); /* Free the reserved page into the buddy system, so it gets managed. */ -static inline void free_reserved_page(struct page *page) -{ - if (mem_alloc_profiling_enabled()) { - union codetag_ref *ref = get_page_tag_ref(page); - - if (ref) { - set_codetag_empty(ref); - put_page_tag_ref(ref); - } - } - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - adjust_managed_page_count(page, 1); -} +void free_reserved_page(struct page *page); #define free_highmem_page(page) free_reserved_page(page) static inline void mark_page_reserved(struct page *page) --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5809,6 +5809,23 @@ unsigned long free_reserved_area(void *s return pages; } +void free_reserved_page(struct page *page) +{ + if (mem_alloc_profiling_enabled()) { + union codetag_ref *ref = get_page_tag_ref(page); + + if (ref) { + set_codetag_empty(ref); + put_page_tag_ref(ref); + } + } + ClearPageReserved(page); + init_page_count(page); + __free_page(page); + adjust_managed_page_count(page, 1); +} +EXPORT_SYMBOL(free_reserved_page); + static int page_alloc_cpu_dead(unsigned int cpu) { struct zone *zone; Patches currently in stable-queue which might be from surenb@xxxxxxxxxx are