The patch titled Subject: lib: introduce support for page allocation tagging has been added to the -mm mm-unstable branch. Its filename is lib-introduce-support-for-page-allocation-tagging.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/lib-introduce-support-for-page-allocation-tagging.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Suren Baghdasaryan <surenb@xxxxxxxxxx> Subject: lib: introduce support for page allocation tagging Date: Thu, 21 Mar 2024 09:36:36 -0700 Introduce helper functions to easily instrument page allocators by storing a pointer to the allocation tag associated with the code that allocated the page in a page_ext field. Link: https://lkml.kernel.org/r/20240321163705.3067592-15-surenb@xxxxxxxxxx Signed-off-by: Suren Baghdasaryan <surenb@xxxxxxxxxx> Co-developed-by: Kent Overstreet <kent.overstreet@xxxxxxxxx> Signed-off-by: Kent Overstreet <kent.overstreet@xxxxxxxxx> Reviewed-by: Vlastimil Babka <vbabka@xxxxxxx> Cc: Alexander Viro <viro@xxxxxxxxxxxxxxxxxx> Cc: Alex Gaynor <alex.gaynor@xxxxxxxxx> Cc: Alice Ryhl <aliceryhl@xxxxxxxxxx> Cc: Andreas Hindborg <a.hindborg@xxxxxxxxxxx> Cc: Benno Lossin <benno.lossin@xxxxxxxxx> Cc: "Björn Roy Baron" <bjorn3_gh@xxxxxxxxxxxxxx> Cc: Boqun Feng <boqun.feng@xxxxxxxxx> Cc: Christoph Lameter <cl@xxxxxxxxx> Cc: Dennis Zhou <dennis@xxxxxxxxxx> Cc: Gary Guo <gary@xxxxxxxxxxx> Cc: Kees Cook <keescook@xxxxxxxxxxxx> Cc: Miguel Ojeda <ojeda@xxxxxxxxxx> Cc: Pasha Tatashin <pasha.tatashin@xxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Tejun Heo <tj@xxxxxxxxxx> Cc: Wedson Almeida Filho <wedsonaf@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/page_ext.h | 1 include/linux/pgalloc_tag.h | 78 ++++++++++++++++++++++++++++++++++ lib/Kconfig.debug | 1 lib/alloc_tag.c | 17 +++++++ mm/mm_init.c | 1 mm/page_alloc.c | 4 + mm/page_ext.c | 4 + 7 files changed, 105 insertions(+), 1 deletion(-) --- a/include/linux/page_ext.h~lib-introduce-support-for-page-allocation-tagging +++ a/include/linux/page_ext.h @@ -4,7 +4,6 @@ #include <linux/types.h> #include <linux/stacktrace.h> -#include <linux/stackdepot.h> struct pglist_data; --- /dev/null +++ a/include/linux/pgalloc_tag.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * page allocation tagging + */ +#ifndef _LINUX_PGALLOC_TAG_H +#define _LINUX_PGALLOC_TAG_H + +#include <linux/alloc_tag.h> + +#ifdef CONFIG_MEM_ALLOC_PROFILING + +#include <linux/page_ext.h> + +extern struct page_ext_operations page_alloc_tagging_ops; +extern struct page_ext *page_ext_get(struct page *page); +extern void page_ext_put(struct page_ext *page_ext); + +static inline union codetag_ref *codetag_ref_from_page_ext(struct page_ext *page_ext) +{ + return (void *)page_ext + page_alloc_tagging_ops.offset; +} + +static inline struct page_ext *page_ext_from_codetag_ref(union codetag_ref *ref) +{ + return (void *)ref - page_alloc_tagging_ops.offset; +} + +/* Should be called only if mem_alloc_profiling_enabled() */ +static inline union codetag_ref *get_page_tag_ref(struct page *page) +{ + if (page) { + struct page_ext *page_ext = page_ext_get(page); + + if (page_ext) + return codetag_ref_from_page_ext(page_ext); + } + return NULL; +} + +static inline void put_page_tag_ref(union codetag_ref *ref) +{ + page_ext_put(page_ext_from_codetag_ref(ref)); +} + +static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, + unsigned int nr) +{ + if (mem_alloc_profiling_enabled()) { + union codetag_ref *ref = get_page_tag_ref(page); + + if (ref) { + alloc_tag_add(ref, task->alloc_tag, PAGE_SIZE * nr); + put_page_tag_ref(ref); + } + } +} + +static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) +{ + if (mem_alloc_profiling_enabled()) { + union codetag_ref *ref = get_page_tag_ref(page); + + if (ref) { + alloc_tag_sub(ref, PAGE_SIZE * nr); + put_page_tag_ref(ref); + } + } +} + +#else /* CONFIG_MEM_ALLOC_PROFILING */ + +static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, + unsigned int nr) {} +static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} + +#endif /* CONFIG_MEM_ALLOC_PROFILING */ + +#endif /* _LINUX_PGALLOC_TAG_H */ --- a/lib/alloc_tag.c~lib-introduce-support-for-page-allocation-tagging +++ a/lib/alloc_tag.c @@ -3,6 +3,7 @@ #include <linux/fs.h> #include <linux/gfp.h> #include <linux/module.h> +#include <linux/page_ext.h> #include <linux/proc_fs.h> #include <linux/seq_buf.h> #include <linux/seq_file.h> @@ -115,6 +116,22 @@ static bool alloc_tag_module_unload(stru return module_unused; } +static __init bool need_page_alloc_tagging(void) +{ + return true; +} + +static __init void init_page_alloc_tagging(void) +{ +} + +struct page_ext_operations page_alloc_tagging_ops = { + .size = sizeof(union codetag_ref), + .need = need_page_alloc_tagging, + .init = init_page_alloc_tagging, +}; +EXPORT_SYMBOL(page_alloc_tagging_ops); + static struct ctl_table memory_allocation_profiling_sysctls[] = { { .procname = "mem_profiling", --- a/lib/Kconfig.debug~lib-introduce-support-for-page-allocation-tagging +++ a/lib/Kconfig.debug @@ -978,6 +978,7 @@ config MEM_ALLOC_PROFILING depends on PROC_FS depends on !DEBUG_FORCE_WEAK_PER_CPU select CODE_TAGGING + select PAGE_EXTENSION help Track allocation source code and record total allocation size initiated at that code location. The mechanism can be used to track --- a/mm/mm_init.c~lib-introduce-support-for-page-allocation-tagging +++ a/mm/mm_init.c @@ -24,6 +24,7 @@ #include <linux/page_ext.h> #include <linux/pti.h> #include <linux/pgtable.h> +#include <linux/stackdepot.h> #include <linux/swap.h> #include <linux/cma.h> #include <linux/crash_dump.h> --- a/mm/page_alloc.c~lib-introduce-support-for-page-allocation-tagging +++ a/mm/page_alloc.c @@ -54,6 +54,7 @@ #include <linux/khugepaged.h> #include <linux/delayacct.h> #include <linux/cacheinfo.h> +#include <linux/pgalloc_tag.h> #include <asm/div64.h> #include "internal.h" #include "shuffle.h" @@ -1101,6 +1102,7 @@ __always_inline bool free_pages_prepare( /* Do not let hwpoison pages hit pcplists/buddy */ reset_page_owner(page, order); page_table_check_free(page, order); + pgalloc_tag_sub(page, 1 << order); return false; } @@ -1140,6 +1142,7 @@ __always_inline bool free_pages_prepare( page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; reset_page_owner(page, order); page_table_check_free(page, order); + pgalloc_tag_sub(page, 1 << order); if (!PageHighMem(page)) { debug_check_no_locks_freed(page_address(page), @@ -1533,6 +1536,7 @@ inline void post_alloc_hook(struct page set_page_owner(page, order, gfp_flags); page_table_check_alloc(page, order); + pgalloc_tag_add(page, current, 1 << order); } static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, --- a/mm/page_ext.c~lib-introduce-support-for-page-allocation-tagging +++ a/mm/page_ext.c @@ -10,6 +10,7 @@ #include <linux/page_idle.h> #include <linux/page_table_check.h> #include <linux/rcupdate.h> +#include <linux/pgalloc_tag.h> /* * struct page extension @@ -82,6 +83,9 @@ static struct page_ext_operations *page_ #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT) &page_idle_ops, #endif +#ifdef CONFIG_MEM_ALLOC_PROFILING + &page_alloc_tagging_ops, +#endif #ifdef CONFIG_PAGE_TABLE_CHECK &page_table_check_ops, #endif _ Patches currently in -mm which might be from surenb@xxxxxxxxxx are mm-introduce-slabobj_ext-to-support-slab-object-extensions.patch mm-introduce-__gfp_no_obj_ext-flag-to-selectively-prevent-slabobj_ext-creation.patch mm-slab-introduce-slab_no_obj_ext-to-avoid-obj_ext-creation.patch slab-objext-introduce-objext_flags-as-extension-to-page_memcg_data_flags.patch lib-code-tagging-framework.patch lib-code-tagging-module-support.patch lib-prevent-module-unloading-if-memory-is-not-freed.patch lib-add-allocation-tagging-support-for-memory-allocation-profiling.patch lib-introduce-support-for-page-allocation-tagging.patch lib-introduce-early-boot-parameter-to-avoid-page_ext-memory-overhead.patch mm-percpu-increase-percpu_module_reserve-to-accommodate-allocation-tags.patch change-alloc_pages-name-in-dma_map_ops-to-avoid-name-conflicts.patch mm-enable-page-allocation-tagging.patch mm-create-new-codetag-references-during-page-splitting.patch mm-fix-non-compound-multi-order-memory-accounting-in-__free_pages.patch mm-page_ext-enable-early_page_ext-when-config_mem_alloc_profiling_debug=y.patch lib-add-codetag-reference-into-slabobj_ext.patch mm-slab-add-allocation-accounting-into-slab-allocation-and-free-paths.patch mm-slab-enable-slab-allocation-tagging-for-kmalloc-and-friends.patch mm-percpu-enable-per-cpu-allocation-tagging.patch lib-add-memory-allocations-report-in-show_mem.patch codetag-debug-skip-objext-checking-when-its-for-objext-itself.patch codetag-debug-mark-codetags-for-reserved-pages-as-empty.patch codetag-debug-introduce-objexts_alloc_fail-to-mark-failed-slab_ext-allocations.patch