Introduce CONFIG_PGALLOC_TAG_REF_BITS to control the size of the page allocation tag references. When the size is configured to be less than a direct pointer, the tags are searched using an index stored as the tag reference. Signed-off-by: Suren Baghdasaryan <surenb@xxxxxxxxxx> --- include/linux/alloc_tag.h | 10 +++- include/linux/codetag.h | 3 ++ include/linux/pgalloc_tag.h | 99 +++++++++++++++++++++++++++++++++++++ lib/Kconfig.debug | 11 +++++ lib/alloc_tag.c | 51 ++++++++++++++++++- lib/codetag.c | 4 +- mm/mm_init.c | 1 + 7 files changed, 175 insertions(+), 4 deletions(-) diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index 21e3098220e3..b5cf24517333 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -30,8 +30,16 @@ struct alloc_tag { struct alloc_tag_counters __percpu *counters; } __aligned(8); +struct alloc_tag_kernel_section { + struct alloc_tag *first_tag; + unsigned long count; +}; + struct alloc_tag_module_section { - unsigned long start_addr; + union { + unsigned long start_addr; + struct alloc_tag *first_tag; + }; unsigned long end_addr; /* used size */ unsigned long size; diff --git a/include/linux/codetag.h b/include/linux/codetag.h index fb4e7adfa746..401fc297eeda 100644 --- a/include/linux/codetag.h +++ b/include/linux/codetag.h @@ -13,6 +13,9 @@ struct codetag_module; struct seq_buf; struct module; +#define CODETAG_SECTION_START_PREFIX "__start_" +#define CODETAG_SECTION_STOP_PREFIX "__stop_" + /* * An instance of this structure is created in a special ELF section at every * code location being tagged. At runtime, the special section is treated as diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index c76b629d0206..a7f8f00c118f 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -9,7 +9,18 @@ #ifdef CONFIG_MEM_ALLOC_PROFILING +#if !defined(CONFIG_PGALLOC_TAG_REF_BITS) || CONFIG_PGALLOC_TAG_REF_BITS > 32 +#define PGALLOC_TAG_DIRECT_REF typedef union codetag_ref pgalloc_tag_ref; +#else /* !defined(CONFIG_PGALLOC_TAG_REF_BITS) || CONFIG_PGALLOC_TAG_REF_BITS > 32 */ +#if CONFIG_PGALLOC_TAG_REF_BITS > 16 +typedef u32 pgalloc_tag_ref; +#else +typedef u16 pgalloc_tag_ref; +#endif +#endif /* !defined(CONFIG_PGALLOC_TAG_REF_BITS) || CONFIG_PGALLOC_TAG_REF_BITS > 32 */ + +#ifdef PGALLOC_TAG_DIRECT_REF static inline void read_pgref(pgalloc_tag_ref *pgref, union codetag_ref *ref) { @@ -20,6 +31,93 @@ static inline void write_pgref(pgalloc_tag_ref *pgref, union codetag_ref *ref) { pgref->ct = ref->ct; } + +static inline void alloc_tag_sec_init(void) {} + +#else /* PGALLOC_TAG_DIRECT_REF */ + +extern struct alloc_tag_kernel_section kernel_tags; + +#define CODETAG_ID_NULL 0 +#define CODETAG_ID_EMPTY 1 +#define CODETAG_ID_FIRST 2 + +#ifdef CONFIG_MODULES + +extern struct alloc_tag_module_section module_tags; + +static inline struct codetag *get_module_ct(pgalloc_tag_ref pgref) +{ + return &module_tags.first_tag[pgref - kernel_tags.count].ct; +} + +static inline pgalloc_tag_ref get_module_pgref(struct alloc_tag *tag) +{ + return CODETAG_ID_FIRST + kernel_tags.count + (tag - module_tags.first_tag); +} + +#else /* CONFIG_MODULES */ + +static inline struct codetag *get_module_ct(pgalloc_tag_ref pgref) +{ + pr_warn("invalid page tag reference %lu\n", (unsigned long)pgref); + return NULL; +} + +static inline pgalloc_tag_ref get_module_pgref(struct alloc_tag *tag) +{ + pr_warn("invalid page tag 0x%lx\n", (unsigned long)tag); + return CODETAG_ID_NULL; +} + +#endif /* CONFIG_MODULES */ + +static inline void read_pgref(pgalloc_tag_ref *pgref, union codetag_ref *ref) +{ + pgalloc_tag_ref pgref_val = *pgref; + + switch (pgref_val) { + case (CODETAG_ID_NULL): + ref->ct = NULL; + break; + case (CODETAG_ID_EMPTY): + set_codetag_empty(ref); + break; + default: + pgref_val -= CODETAG_ID_FIRST; + ref->ct = pgref_val < kernel_tags.count ? + &kernel_tags.first_tag[pgref_val].ct : + get_module_ct(pgref_val); + break; + } +} + +static inline void write_pgref(pgalloc_tag_ref *pgref, union codetag_ref *ref) +{ + struct alloc_tag *tag; + + if (!ref->ct) { + *pgref = CODETAG_ID_NULL; + return; + } + + if (is_codetag_empty(ref)) { + *pgref = CODETAG_ID_EMPTY; + return; + } + + tag = ct_to_alloc_tag(ref->ct); + if (tag >= kernel_tags.first_tag && tag < kernel_tags.first_tag + kernel_tags.count) { + *pgref = CODETAG_ID_FIRST + (tag - kernel_tags.first_tag); + return; + } + + *pgref = get_module_pgref(tag); +} + +void __init alloc_tag_sec_init(void); + +#endif /* PGALLOC_TAG_DIRECT_REF */ #include <linux/page_ext.h> extern struct page_ext_operations page_alloc_tagging_ops; @@ -197,6 +295,7 @@ static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} static inline void pgalloc_tag_split(struct page *page, unsigned int nr) {} static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; } static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {} +static inline void alloc_tag_sec_init(void) {} #endif /* CONFIG_MEM_ALLOC_PROFILING */ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a30c03a66172..253f9c2028da 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1000,6 +1000,17 @@ config MEM_ALLOC_PROFILING_DEBUG Adds warnings with helpful error messages for memory allocation profiling. +config PGALLOC_TAG_REF_BITS + int "Number of bits for page allocation tag reference (10-64)" + range 10 64 + default "64" + depends on MEM_ALLOC_PROFILING + help + Number of bits used to encode a page allocation tag reference. + + Smaller number results in less memory overhead but limits the number of + allocations which can be tagged (including allocations from modules). + source "lib/Kconfig.kasan" source "lib/Kconfig.kfence" source "lib/Kconfig.kmsan" diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 53bd3236d30b..73791aa55ab6 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -3,6 +3,7 @@ #include <linux/execmem.h> #include <linux/fs.h> #include <linux/gfp.h> +#include <linux/kallsyms.h> #include <linux/module.h> #include <linux/page_ext.h> #include <linux/pgalloc_tag.h> @@ -151,6 +152,26 @@ static void __init procfs_init(void) proc_create_seq("allocinfo", 0400, NULL, &allocinfo_seq_op); } +#ifndef PGALLOC_TAG_DIRECT_REF + +#define SECTION_START(NAME) (CODETAG_SECTION_START_PREFIX NAME) +#define SECTION_STOP(NAME) (CODETAG_SECTION_STOP_PREFIX NAME) + +struct alloc_tag_kernel_section kernel_tags = { NULL, 0 }; + +void __init alloc_tag_sec_init(void) +{ + struct alloc_tag *last_codetag; + + kernel_tags.first_tag = (struct alloc_tag *)kallsyms_lookup_name( + SECTION_START(ALLOC_TAG_SECTION_NAME)); + last_codetag = (struct alloc_tag *)kallsyms_lookup_name( + SECTION_STOP(ALLOC_TAG_SECTION_NAME)); + kernel_tags.count = last_codetag - kernel_tags.first_tag; +} + +#endif /* PGALLOC_TAG_DIRECT_REF */ + #ifdef CONFIG_MODULES static struct maple_tree mod_area_mt = MTREE_INIT(mod_area_mt, MT_FLAGS_ALLOC_RANGE); @@ -159,7 +180,16 @@ static struct module unloaded_mod; /* A dummy object used to indicate a module prepended area */ static struct module prepend_mod; -static struct alloc_tag_module_section module_tags; +struct alloc_tag_module_section module_tags; + +#ifndef PGALLOC_TAG_DIRECT_REF +static inline unsigned long alloc_tag_align(unsigned long val) +{ + if (val % sizeof(struct alloc_tag) == 0) + return val; + return ((val / sizeof(struct alloc_tag)) + 1) * sizeof(struct alloc_tag); +} +#endif /* PGALLOC_TAG_DIRECT_REF */ static bool needs_section_mem(struct module *mod, unsigned long size) { @@ -216,6 +246,21 @@ static void *reserve_module_tags(struct module *mod, unsigned long size, if (!align) align = 1; +#ifndef PGALLOC_TAG_DIRECT_REF + /* + * If alloc_tag size is not a multiple of required alignment tag + * indexing does not work. + */ + if (!IS_ALIGNED(sizeof(struct alloc_tag), align)) { + pr_err("%s: alignment %lu is incompatible with allocation tag indexing (CONFIG_PGALLOC_TAG_REF_BITS)", + mod->name, align); + return ERR_PTR(-EINVAL); + } + + /* Ensure prepend consumes multiple of alloc_tag-sized blocks */ + if (prepend) + prepend = alloc_tag_align(prepend); +#endif /* PGALLOC_TAG_DIRECT_REF */ mas_lock(&mas); repeat: /* Try finding exact size and hope the start is aligned */ @@ -373,6 +418,10 @@ static int __init alloc_mod_tags_mem(void) return -ENOMEM; module_tags.end_addr = module_tags.start_addr + module_tags_mem_sz; +#ifndef PGALLOC_TAG_DIRECT_REF + /* Ensure the base is alloc_tag aligned */ + module_tags.start_addr = alloc_tag_align(module_tags.start_addr); +#endif return 0; } diff --git a/lib/codetag.c b/lib/codetag.c index 60463ef4bb85..cb3aa1631417 100644 --- a/lib/codetag.c +++ b/lib/codetag.c @@ -149,8 +149,8 @@ static struct codetag_range get_section_range(struct module *mod, const char *section) { return (struct codetag_range) { - get_symbol(mod, "__start_", section), - get_symbol(mod, "__stop_", section), + get_symbol(mod, CODETAG_SECTION_START_PREFIX, section), + get_symbol(mod, CODETAG_SECTION_STOP_PREFIX, section), }; } diff --git a/mm/mm_init.c b/mm/mm_init.c index 4ba5607aaf19..231a95782455 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2650,6 +2650,7 @@ void __init mm_core_init(void) report_meminit(); kmsan_init_shadow(); stack_depot_early_init(); + alloc_tag_sec_init(); mem_init(); kmem_cache_init(); /* -- 2.46.0.469.g59c65b2a67-goog