The quilt patch titled Subject: mm: percpu: enable per-cpu allocation tagging has been removed from the -mm tree. Its filename was mm-percpu-enable-per-cpu-allocation-tagging.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Suren Baghdasaryan <surenb@xxxxxxxxxx> Subject: mm: percpu: enable per-cpu allocation tagging Date: Thu, 21 Mar 2024 09:36:51 -0700 Redefine __alloc_percpu, __alloc_percpu_gfp and __alloc_reserved_percpu to record allocations and deallocations done by these functions. [surenb@xxxxxxxxxx: undo _noprof additions in the documentation] Link: https://lkml.kernel.org/r/20240326231453.1206227-6-surenb@xxxxxxxxxx Link: https://lkml.kernel.org/r/20240321163705.3067592-30-surenb@xxxxxxxxxx Signed-off-by: Kent Overstreet <kent.overstreet@xxxxxxxxx> Signed-off-by: Suren Baghdasaryan <surenb@xxxxxxxxxx> Tested-by: Kees Cook <keescook@xxxxxxxxxxxx> Cc: Alexander Viro <viro@xxxxxxxxxxxxxxxxxx> Cc: Alex Gaynor <alex.gaynor@xxxxxxxxx> Cc: Alice Ryhl <aliceryhl@xxxxxxxxxx> Cc: Andreas Hindborg <a.hindborg@xxxxxxxxxxx> Cc: Benno Lossin <benno.lossin@xxxxxxxxx> Cc: "Björn Roy Baron" <bjorn3_gh@xxxxxxxxxxxxxx> Cc: Boqun Feng <boqun.feng@xxxxxxxxx> Cc: Christoph Lameter <cl@xxxxxxxxx> Cc: Dennis Zhou <dennis@xxxxxxxxxx> Cc: Gary Guo <gary@xxxxxxxxxxx> Cc: Miguel Ojeda <ojeda@xxxxxxxxxx> Cc: Pasha Tatashin <pasha.tatashin@xxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Tejun Heo <tj@xxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Wedson Almeida Filho <wedsonaf@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/percpu.h | 23 ++++++++++---- mm/percpu.c | 62 +++------------------------------------ 2 files changed, 22 insertions(+), 63 deletions(-) --- a/include/linux/percpu.h~mm-percpu-enable-per-cpu-allocation-tagging +++ a/include/linux/percpu.h @@ -2,6 +2,7 @@ #ifndef __LINUX_PERCPU_H #define __LINUX_PERCPU_H +#include <linux/alloc_tag.h> #include <linux/mmdebug.h> #include <linux/preempt.h> #include <linux/smp.h> @@ -9,6 +10,7 @@ #include <linux/pfn.h> #include <linux/init.h> #include <linux/cleanup.h> +#include <linux/sched.h> #include <asm/percpu.h> @@ -125,7 +127,6 @@ extern int __init pcpu_page_first_chunk( pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn); #endif -extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __alloc_size(1); extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr); extern bool is_kernel_percpu_address(unsigned long addr); @@ -133,14 +134,16 @@ extern bool is_kernel_percpu_address(uns extern void __init setup_per_cpu_areas(void); #endif -extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1); -extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1); -extern void free_percpu(void __percpu *__pdata); +extern void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved, + gfp_t gfp) __alloc_size(1); extern size_t pcpu_alloc_size(void __percpu *__pdata); -DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T)) - -extern phys_addr_t per_cpu_ptr_to_phys(void *addr); +#define __alloc_percpu_gfp(_size, _align, _gfp) \ + alloc_hooks(pcpu_alloc_noprof(_size, _align, false, _gfp)) +#define __alloc_percpu(_size, _align) \ + alloc_hooks(pcpu_alloc_noprof(_size, _align, false, GFP_KERNEL)) +#define __alloc_reserved_percpu(_size, _align) \ + alloc_hooks(pcpu_alloc_noprof(_size, _align, true, GFP_KERNEL)) #define alloc_percpu_gfp(type, gfp) \ (typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \ @@ -149,6 +152,12 @@ extern phys_addr_t per_cpu_ptr_to_phys(v (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ __alignof__(type)) +extern void free_percpu(void __percpu *__pdata); + +DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T)) + +extern phys_addr_t per_cpu_ptr_to_phys(void *addr); + extern unsigned long pcpu_nr_pages(void); #endif /* __LINUX_PERCPU_H */ --- a/mm/percpu.c~mm-percpu-enable-per-cpu-allocation-tagging +++ a/mm/percpu.c @@ -1740,7 +1740,7 @@ static void pcpu_alloc_tag_free_hook(str * RETURNS: * Percpu pointer to the allocated area on success, NULL on failure. */ -static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, +void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved, gfp_t gfp) { gfp_t pcpu_gfp; @@ -1907,6 +1907,8 @@ area_found: pcpu_memcg_post_alloc_hook(objcg, chunk, off, size); + pcpu_alloc_tag_alloc_hook(chunk, off, size); + return ptr; fail_unlock: @@ -1935,61 +1937,7 @@ fail: return NULL; } - -/** - * __alloc_percpu_gfp - allocate dynamic percpu area - * @size: size of area to allocate in bytes - * @align: alignment of area (max PAGE_SIZE) - * @gfp: allocation flags - * - * Allocate zero-filled percpu area of @size bytes aligned at @align. If - * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can - * be called from any context but is a lot more likely to fail. If @gfp - * has __GFP_NOWARN then no warning will be triggered on invalid or failed - * allocation requests. - * - * RETURNS: - * Percpu pointer to the allocated area on success, NULL on failure. - */ -void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) -{ - return pcpu_alloc(size, align, false, gfp); -} -EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); - -/** - * __alloc_percpu - allocate dynamic percpu area - * @size: size of area to allocate in bytes - * @align: alignment of area (max PAGE_SIZE) - * - * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). - */ -void __percpu *__alloc_percpu(size_t size, size_t align) -{ - return pcpu_alloc(size, align, false, GFP_KERNEL); -} -EXPORT_SYMBOL_GPL(__alloc_percpu); - -/** - * __alloc_reserved_percpu - allocate reserved percpu area - * @size: size of area to allocate in bytes - * @align: alignment of area (max PAGE_SIZE) - * - * Allocate zero-filled percpu area of @size bytes aligned at @align - * from reserved percpu area if arch has set it up; otherwise, - * allocation is served from the same dynamic area. Might sleep. - * Might trigger writeouts. - * - * CONTEXT: - * Does GFP_KERNEL allocation. - * - * RETURNS: - * Percpu pointer to the allocated area on success, NULL on failure. - */ -void __percpu *__alloc_reserved_percpu(size_t size, size_t align) -{ - return pcpu_alloc(size, align, true, GFP_KERNEL); -} +EXPORT_SYMBOL_GPL(pcpu_alloc_noprof); /** * pcpu_balance_free - manage the amount of free chunks @@ -2328,6 +2276,8 @@ void free_percpu(void __percpu *ptr) spin_lock_irqsave(&pcpu_lock, flags); size = pcpu_free_area(chunk, off); + pcpu_alloc_tag_free_hook(chunk, off, size); + pcpu_memcg_free_hook(chunk, off, size); /* _ Patches currently in -mm which might be from surenb@xxxxxxxxxx are userfaultfd-remove-write_once-when-setting-folio-index-during-uffdio_move.patch