Redefine __alloc_percpu, __alloc_percpu_gfp and __alloc_reserved_percpu to record allocations and deallocations done by these functions. Signed-off-by: Kent Overstreet <kent.overstreet@xxxxxxxxx> Signed-off-by: Suren Baghdasaryan <surenb@xxxxxxxxxx> --- include/linux/percpu.h | 19 ++++++++---- mm/percpu.c | 66 +++++------------------------------------- 2 files changed, 22 insertions(+), 63 deletions(-) diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 1338ea2aa720..51ec257379af 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -2,12 +2,14 @@ #ifndef __LINUX_PERCPU_H #define __LINUX_PERCPU_H +#include <linux/alloc_tag.h> #include <linux/mmdebug.h> #include <linux/preempt.h> #include <linux/smp.h> #include <linux/cpumask.h> #include <linux/pfn.h> #include <linux/init.h> +#include <linux/sched.h> #include <asm/percpu.h> @@ -116,7 +118,6 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn); #endif -extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __alloc_size(1); extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr); extern bool is_kernel_percpu_address(unsigned long addr); @@ -124,10 +125,15 @@ extern bool is_kernel_percpu_address(unsigned long addr); extern void __init setup_per_cpu_areas(void); #endif -extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1); -extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1); -extern void free_percpu(void __percpu *__pdata); -extern phys_addr_t per_cpu_ptr_to_phys(void *addr); +extern void __percpu *__pcpu_alloc(size_t size, size_t align, bool reserved, + gfp_t gfp) __alloc_size(1); + +#define __alloc_percpu_gfp(_size, _align, _gfp) alloc_hooks( \ + __pcpu_alloc(_size, _align, false, _gfp), void __percpu *, NULL) +#define __alloc_percpu(_size, _align) alloc_hooks( \ + __pcpu_alloc(_size, _align, false, GFP_KERNEL), void __percpu *, NULL) +#define __alloc_reserved_percpu(_size, _align) alloc_hooks( \ + __pcpu_alloc(_size, _align, true, GFP_KERNEL), void __percpu *, NULL) #define alloc_percpu_gfp(type, gfp) \ (typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \ @@ -136,6 +142,9 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ __alignof__(type)) +extern void free_percpu(void __percpu *__pdata); +extern phys_addr_t per_cpu_ptr_to_phys(void *addr); + extern unsigned long pcpu_nr_pages(void); #endif /* __LINUX_PERCPU_H */ diff --git a/mm/percpu.c b/mm/percpu.c index 4e2592f2e58f..4b5cf260d8e0 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1728,7 +1728,7 @@ static void pcpu_alloc_tag_free_hook(struct pcpu_chunk *chunk, int off, size_t s #endif /** - * pcpu_alloc - the percpu allocator + * __pcpu_alloc - the percpu allocator * @size: size of area to allocate in bytes * @align: alignment of area (max PAGE_SIZE) * @reserved: allocate from the reserved chunk if available @@ -1742,8 +1742,8 @@ static void pcpu_alloc_tag_free_hook(struct pcpu_chunk *chunk, int off, size_t s * RETURNS: * Percpu pointer to the allocated area on success, NULL on failure. */ -static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, - gfp_t gfp) +void __percpu *__pcpu_alloc(size_t size, size_t align, bool reserved, + gfp_t gfp) { gfp_t pcpu_gfp; bool is_atomic; @@ -1909,6 +1909,8 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, pcpu_memcg_post_alloc_hook(objcg, chunk, off, size); + pcpu_alloc_tag_alloc_hook(chunk, off, size); + return ptr; fail_unlock: @@ -1935,61 +1937,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, return NULL; } - -/** - * __alloc_percpu_gfp - allocate dynamic percpu area - * @size: size of area to allocate in bytes - * @align: alignment of area (max PAGE_SIZE) - * @gfp: allocation flags - * - * Allocate zero-filled percpu area of @size bytes aligned at @align. If - * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can - * be called from any context but is a lot more likely to fail. If @gfp - * has __GFP_NOWARN then no warning will be triggered on invalid or failed - * allocation requests. - * - * RETURNS: - * Percpu pointer to the allocated area on success, NULL on failure. - */ -void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) -{ - return pcpu_alloc(size, align, false, gfp); -} -EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); - -/** - * __alloc_percpu - allocate dynamic percpu area - * @size: size of area to allocate in bytes - * @align: alignment of area (max PAGE_SIZE) - * - * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). - */ -void __percpu *__alloc_percpu(size_t size, size_t align) -{ - return pcpu_alloc(size, align, false, GFP_KERNEL); -} -EXPORT_SYMBOL_GPL(__alloc_percpu); - -/** - * __alloc_reserved_percpu - allocate reserved percpu area - * @size: size of area to allocate in bytes - * @align: alignment of area (max PAGE_SIZE) - * - * Allocate zero-filled percpu area of @size bytes aligned at @align - * from reserved percpu area if arch has set it up; otherwise, - * allocation is served from the same dynamic area. Might sleep. - * Might trigger writeouts. - * - * CONTEXT: - * Does GFP_KERNEL allocation. - * - * RETURNS: - * Percpu pointer to the allocated area on success, NULL on failure. - */ -void __percpu *__alloc_reserved_percpu(size_t size, size_t align) -{ - return pcpu_alloc(size, align, true, GFP_KERNEL); -} +EXPORT_SYMBOL_GPL(__pcpu_alloc); /** * pcpu_balance_free - manage the amount of free chunks @@ -2299,6 +2247,8 @@ void free_percpu(void __percpu *ptr) size = pcpu_free_area(chunk, off); + pcpu_alloc_tag_free_hook(chunk, off, size); + pcpu_memcg_free_hook(chunk, off, size); /* -- 2.40.1.495.gc816e09b53d-goog