The patch titled Subject: mm: kmemleak: add OBJECT_PHYS flag for objects allocated with physical address has been added to the -mm mm-unstable branch. Its filename is mm-kmemleak-add-object_phys-flag-for-objects-allocated-with-physical-address.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-kmemleak-add-object_phys-flag-for-objects-allocated-with-physical-address.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Patrick Wang <patrick.wang.shcn@xxxxxxxxx> Subject: mm: kmemleak: add OBJECT_PHYS flag for objects allocated with physical address Date: Thu, 9 Jun 2022 20:49:48 +0800 Patch series "mm: kmemleak: store objects allocated with physical address separately and check when scan", v3. The kmemleak_*_phys() interface uses "min_low_pfn" and "max_low_pfn" to check address. But on some architectures, kmemleak_*_phys() is called before those two variables initialized. The following steps will be taken: 1) Add OBJECT_PHYS flag and rbtree for the objects allocated with physical address 2) Store physical address in objects if allocated with OBJECT_PHYS 3) Check the boundary when scan instead of in kmemleak_*_phys() This patch set will solve: https://lore.kernel.org/r/20220527032504.30341-1-yee.lee@xxxxxxxxxxxx https://lore.kernel.org/r/9dd08bb5-f39e-53d8-f88d-bec598a08c93@xxxxxxxxx This patch (of 3): Add OBJECT_PHYS flag for object. This flag is used to identify the objects allocated with physical addresses. create_object_phys() is added as well to set that flag. And remove the min_count argument to kmemleak_alloc_phys() function, assume it's 0. Link: https://lkml.kernel.org/r/20220609124950.1694394-1-patrick.wang.shcn@xxxxxxxxx Link: https://lkml.kernel.org/r/20220609124950.1694394-2-patrick.wang.shcn@xxxxxxxxx Signed-off-by: Patrick Wang <patrick.wang.shcn@xxxxxxxxx> Suggested-by: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Yee Lee <yee.lee@xxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- drivers/of/fdt.c | 2 - include/linux/kmemleak.h | 4 +- mm/kmemleak.c | 44 ++++++++++++++++------ mm/memblock.c | 14 +++---- tools/testing/memblock/linux/kmemleak.h | 2 - 5 files changed, 43 insertions(+), 23 deletions(-) --- a/drivers/of/fdt.c~mm-kmemleak-add-object_phys-flag-for-objects-allocated-with-physical-address +++ a/drivers/of/fdt.c @@ -529,7 +529,7 @@ static int __init __reserved_mem_reserve pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n", uname, &base, (unsigned long)(size / SZ_1M)); if (!nomap) - kmemleak_alloc_phys(base, size, 0, 0); + kmemleak_alloc_phys(base, size, 0); } else pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n", --- a/include/linux/kmemleak.h~mm-kmemleak-add-object_phys-flag-for-objects-allocated-with-physical-address +++ a/include/linux/kmemleak.h @@ -29,7 +29,7 @@ extern void kmemleak_not_leak(const void extern void kmemleak_ignore(const void *ptr) __ref; extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref; extern void kmemleak_no_scan(const void *ptr) __ref; -extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, +extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp) __ref; extern void kmemleak_free_part_phys(phys_addr_t phys, size_t size) __ref; extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref; @@ -107,7 +107,7 @@ static inline void kmemleak_no_scan(cons { } static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size, - int min_count, gfp_t gfp) + gfp_t gfp) { } static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size) --- a/mm/kmemleak.c~mm-kmemleak-add-object_phys-flag-for-objects-allocated-with-physical-address +++ a/mm/kmemleak.c @@ -172,6 +172,8 @@ struct kmemleak_object { #define OBJECT_NO_SCAN (1 << 2) /* flag set to fully scan the object when scan_area allocation failed */ #define OBJECT_FULL_SCAN (1 << 3) +/* flag set for object allocated with physical address */ +#define OBJECT_PHYS (1 << 4) #define HEX_PREFIX " " /* number of bytes to print per line; must be 16 or 32 */ @@ -574,8 +576,9 @@ static int __save_stack_trace(unsigned l * Create the metadata (struct kmemleak_object) corresponding to an allocated * memory block and add it to the object_list and object_tree_root. */ -static struct kmemleak_object *create_object(unsigned long ptr, size_t size, - int min_count, gfp_t gfp) +static struct kmemleak_object *__create_object(unsigned long ptr, size_t size, + int min_count, gfp_t gfp, + bool is_phys) { unsigned long flags; struct kmemleak_object *object, *parent; @@ -595,7 +598,7 @@ static struct kmemleak_object *create_ob INIT_HLIST_HEAD(&object->area_list); raw_spin_lock_init(&object->lock); atomic_set(&object->use_count, 1); - object->flags = OBJECT_ALLOCATED; + object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0); object->pointer = ptr; object->size = kfence_ksize((void *)ptr) ?: size; object->excess_ref = 0; @@ -662,6 +665,20 @@ out: return object; } +/* Create kmemleak object which allocated with virtual address. */ +static struct kmemleak_object *create_object(unsigned long ptr, size_t size, + int min_count, gfp_t gfp) +{ + return __create_object(ptr, size, min_count, gfp, false); +} + +/* Create kmemleak object which allocated with physical address. */ +static struct kmemleak_object *create_object_phys(unsigned long ptr, size_t size, + int min_count, gfp_t gfp) +{ + return __create_object(ptr, size, min_count, gfp, true); +} + /* * Mark the object as not allocated and schedule RCU freeing via put_object(). */ @@ -728,11 +745,11 @@ static void delete_object_part(unsigned start = object->pointer; end = object->pointer + object->size; if (ptr > start) - create_object(start, ptr - start, object->min_count, - GFP_KERNEL); + __create_object(start, ptr - start, object->min_count, + GFP_KERNEL, object->flags & OBJECT_PHYS); if (ptr + size < end) - create_object(ptr + size, end - ptr - size, object->min_count, - GFP_KERNEL); + __create_object(ptr + size, end - ptr - size, object->min_count, + GFP_KERNEL, object->flags & OBJECT_PHYS); __delete_object(object); } @@ -1125,15 +1142,18 @@ EXPORT_SYMBOL(kmemleak_no_scan); * address argument * @phys: physical address of the object * @size: size of the object - * @min_count: minimum number of references to this object. - * See kmemleak_alloc() * @gfp: kmalloc() flags used for kmemleak internal memory allocations */ -void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, - gfp_t gfp) +void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp) { + pr_debug("%s(0x%pa, %zu)\n", __func__, &phys, size); + if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) - kmemleak_alloc(__va(phys), size, min_count, gfp); + /* + * Create object with OBJECT_PHYS flag and + * assume min_count 0. + */ + create_object_phys((unsigned long)__va(phys), size, 0, gfp); } EXPORT_SYMBOL(kmemleak_alloc_phys); --- a/mm/memblock.c~mm-kmemleak-add-object_phys-flag-for-objects-allocated-with-physical-address +++ a/mm/memblock.c @@ -1345,8 +1345,8 @@ __next_mem_pfn_range_in_zone(u64 *idx, s * from the regions with mirroring enabled and then retried from any * memory region. * - * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for - * allocated boot memory block, so that it is never reported as leaks. + * In addition, function using kmemleak_alloc_phys for allocated boot + * memory block, it is never reported as leaks. * * Return: * Physical address of allocated memory block on success, %0 on failure. @@ -1398,12 +1398,12 @@ done: */ if (end != MEMBLOCK_ALLOC_NOLEAKTRACE) /* - * The min_count is set to 0 so that memblock allocated - * blocks are never reported as leaks. This is because many - * of these blocks are only referred via the physical - * address which is not looked up by kmemleak. + * Memblock allocated blocks are never reported as + * leaks. This is because many of these blocks are + * only referred via the physical address which is + * not looked up by kmemleak. */ - kmemleak_alloc_phys(found, size, 0, 0); + kmemleak_alloc_phys(found, size, 0); return found; } --- a/tools/testing/memblock/linux/kmemleak.h~mm-kmemleak-add-object_phys-flag-for-objects-allocated-with-physical-address +++ a/tools/testing/memblock/linux/kmemleak.h @@ -7,7 +7,7 @@ static inline void kmemleak_free_part_ph } static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size, - int min_count, gfp_t gfp) + gfp_t gfp) { } _ Patches currently in -mm which might be from patrick.wang.shcn@xxxxxxxxx are mm-kmemleak-add-object_phys-flag-for-objects-allocated-with-physical-address.patch mm-kmemleak-add-rbtree-and-store-physical-address-for-objects-allocated-with-pa.patch mm-kmemleak-check-physical-address-when-scan.patch