To avoid debug warnings while freeing reserved pages which were not allocated with usual allocators, mark their codetags as empty before freeing. Signed-off-by: Suren Baghdasaryan <surenb@xxxxxxxxxx> --- include/linux/alloc_tag.h | 2 ++ include/linux/mm.h | 8 ++++++++ include/linux/pgalloc_tag.h | 2 ++ mm/mm_init.c | 9 +++++++++ 4 files changed, 21 insertions(+) diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index 1f3207097b03..102caf62c2a9 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -95,6 +95,7 @@ static inline void set_codetag_empty(union codetag_ref *ref) #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ static inline bool is_codetag_empty(union codetag_ref *ref) { return false; } +static inline void set_codetag_empty(union codetag_ref *ref) {} #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ @@ -155,6 +156,7 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {} static inline void alloc_tag_sub_noalloc(union codetag_ref *ref, size_t bytes) {} static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes) {} +static inline void set_codetag_empty(union codetag_ref *ref) {} #endif diff --git a/include/linux/mm.h b/include/linux/mm.h index f5a97dec5169..ac1b661987ed 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -5,6 +5,7 @@ #include <linux/errno.h> #include <linux/mmdebug.h> #include <linux/gfp.h> +#include <linux/pgalloc_tag.h> #include <linux/bug.h> #include <linux/list.h> #include <linux/mmzone.h> @@ -3112,6 +3113,13 @@ extern void reserve_bootmem_region(phys_addr_t start, /* Free the reserved page into the buddy system, so it gets managed. */ static inline void free_reserved_page(struct page *page) { + union codetag_ref *ref; + + ref = get_page_tag_ref(page); + if (ref) { + set_codetag_empty(ref); + put_page_tag_ref(ref); + } ClearPageReserved(page); init_page_count(page); __free_page(page); diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index 0174aff5e871..ae9b0f359264 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -93,6 +93,8 @@ static inline void pgalloc_tag_split(struct page *page, unsigned int nr) #else /* CONFIG_MEM_ALLOC_PROFILING */ +static inline union codetag_ref *get_page_tag_ref(struct page *page) { return NULL; } +static inline void put_page_tag_ref(union codetag_ref *ref) {} static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, unsigned int order) {} static inline void pgalloc_tag_sub(struct page *page, unsigned int order) {} diff --git a/mm/mm_init.c b/mm/mm_init.c index e9ea2919d02d..f5386632fe86 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2566,6 +2566,7 @@ void __init set_dma_reserve(unsigned long new_dma_reserve) void __init memblock_free_pages(struct page *page, unsigned long pfn, unsigned int order) { + union codetag_ref *ref; if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) { int nid = early_pfn_to_nid(pfn); @@ -2578,6 +2579,14 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn, /* KMSAN will take care of these pages. */ return; } + + /* pages were reserved and not allocated */ + ref = get_page_tag_ref(page); + if (ref) { + set_codetag_empty(ref); + put_page_tag_ref(ref); + } + __free_pages_core(page, order); } -- 2.43.0.687.g38aa6559b0-goog