The patch titled Subject: mm: memcg: plumbing memcg for kmem cache allocations has been added to the -mm tree. Its filename is mm-memcg-plumbing-memcg-for-kmem-cache-allocations.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/mm-memcg-plumbing-memcg-for-kmem-cache-allocations.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/mm-memcg-plumbing-memcg-for-kmem-cache-allocations.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/SubmitChecklist when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Shakeel Butt <shakeelb@xxxxxxxxxx> Subject: mm: memcg: plumbing memcg for kmem cache allocations Patch series "Directed kmem charging", v2. This patchset introduces memcg variant memory allocation functions. The caller can explicitly pass the memcg to charge for kmem allocations. Currently the kernel, for __GFP_ACCOUNT memory allocation requests, extract the memcg of the current task to charge for the kmem allocation. This patch series introduces kmem allocation functions where the caller can pass the pointer to the remote memcg. The remote memcg will be charged for the allocation instead of the memcg of the caller. However the caller must have a reference to the remote memcg. This patch (of 3): Introduce the memcg variant for kmem cache allocation functions. Currently the kernel switches the root kmem cache with the memcg specific kmem cache for __GFP_ACCOUNT allocations to charge those allocations to the memcg. However, the memcg to charge is extracted from the current task_struct. This patch introduces the variant of kmem cache allocation functions where the memcg can be provided explicitly by the caller instead of deducing the memcg from the current task. These functions are useful for use-cases where the allocations should be charged to the memcg different from the memcg of the caller. One such concrete use-case is the allocations for fsnotify event objects where the objects should be charged to the listener instead of the producer. One requirement to call these functions is that the caller must have the reference to the memcg. Link: http://lkml.kernel.org/r/20180221030101.221206-2-shakeelb@xxxxxxxxxx Signed-off-by: Shakeel Butt <shakeelb@xxxxxxxxxx> Cc: Jan Kara <jack@xxxxxxx> Cc: Amir Goldstein <amir73il@xxxxxxxxx> Cc: Christoph Lameter <cl@xxxxxxxxx> Cc: Pekka Enberg <penberg@xxxxxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Cc: Greg Thelen <gthelen@xxxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxxxx> Cc: Vladimir Davydov <vdavydov.dev@xxxxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/memcontrol.h | 3 - include/linux/slab.h | 41 ++++++++++++++++++ mm/memcontrol.c | 18 ++++++-- mm/slab.c | 78 ++++++++++++++++++++++++++++++----- mm/slab.h | 6 +- mm/slob.c | 7 +++ mm/slub.c | 77 ++++++++++++++++++++++++++++------ 7 files changed, 199 insertions(+), 31 deletions(-) diff -puN include/linux/memcontrol.h~mm-memcg-plumbing-memcg-for-kmem-cache-allocations include/linux/memcontrol.h --- a/include/linux/memcontrol.h~mm-memcg-plumbing-memcg-for-kmem-cache-allocations +++ a/include/linux/memcontrol.h @@ -1140,7 +1140,8 @@ static inline bool mem_cgroup_under_sock } #endif -struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); +struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep, + struct mem_cgroup *memcg); void memcg_kmem_put_cache(struct kmem_cache *cachep); int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, struct mem_cgroup *memcg); diff -puN include/linux/slab.h~mm-memcg-plumbing-memcg-for-kmem-cache-allocations include/linux/slab.h --- a/include/linux/slab.h~mm-memcg-plumbing-memcg-for-kmem-cache-allocations +++ a/include/linux/slab.h @@ -353,6 +353,8 @@ static __always_inline int kmalloc_index void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc; void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc; +void *kmem_cache_alloc_memcg(struct kmem_cache *, gfp_t flags, + struct mem_cgroup *memcg) __assume_slab_alignment __malloc; void kmem_cache_free(struct kmem_cache *, void *); /* @@ -377,6 +379,8 @@ static __always_inline void kfree_bulk(s #ifdef CONFIG_NUMA void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc; void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc; +void *kmem_cache_alloc_node_memcg(struct kmem_cache *, gfp_t flags, int node, + struct mem_cgroup *memcg) __assume_slab_alignment __malloc; #else static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) { @@ -387,15 +391,26 @@ static __always_inline void *kmem_cache_ { return kmem_cache_alloc(s, flags); } + +static __always_inline void *kmem_cache_alloc_node_memcg(struct kmem_cache *s, + gfp_t flags, int node, struct mem_cgroup *memcg) +{ + return kmem_cache_alloc_memcg(s, flags, memcg); +} #endif #ifdef CONFIG_TRACING extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc; +extern void *kmem_cache_alloc_memcg_trace(struct kmem_cache *, gfp_t, size_t, + struct mem_cgroup *memcg) __assume_slab_alignment __malloc; #ifdef CONFIG_NUMA extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, int node, size_t size) __assume_slab_alignment __malloc; +extern void *kmem_cache_alloc_node_memcg_trace(struct kmem_cache *s, + gfp_t gfpflags, int node, size_t size, + struct mem_cgroup *memcg) __assume_slab_alignment __malloc; #else static __always_inline void * kmem_cache_alloc_node_trace(struct kmem_cache *s, @@ -404,6 +419,13 @@ kmem_cache_alloc_node_trace(struct kmem_ { return kmem_cache_alloc_trace(s, gfpflags, size); } + +static __always_inline void * +kmem_cache_alloc_node_memcg_trace(struct kmem_cache *s, gfp_t gfpflags, + int node, size_t size, struct mem_cgroup *memcg) +{ + return kmem_cache_alloc_memcg_trace(s, gfpflags, size, memcg); +} #endif /* CONFIG_NUMA */ #else /* CONFIG_TRACING */ @@ -416,6 +438,15 @@ static __always_inline void *kmem_cache_ return ret; } +static __always_inline void *kmem_cache_alloc_memcg_trace(struct kmem_cache *s, + gfp_t flags, size_t size, struct mem_cgroup *memcg) +{ + void *ret = kmem_cache_alloc_memcg(s, flags, memcg); + + kasan_kmalloc(s, ret, size, flags); + return ret; +} + static __always_inline void * kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, @@ -425,6 +456,16 @@ kmem_cache_alloc_node_trace(struct kmem_ kasan_kmalloc(s, ret, size, gfpflags); return ret; +} + +static __always_inline void * +kmem_cache_alloc_node_memcg_trace(struct kmem_cache *s, gfp_t gfpflags, + int node, size_t size, struct mem_cgroup *memcg) +{ + void *ret = kmem_cache_alloc_node_memcg(s, gfpflags, node, memcg); + + kasan_kmalloc(s, ret, size, gfpflags); + return ret; } #endif /* CONFIG_TRACING */ diff -puN mm/memcontrol.c~mm-memcg-plumbing-memcg-for-kmem-cache-allocations mm/memcontrol.c --- a/mm/memcontrol.c~mm-memcg-plumbing-memcg-for-kmem-cache-allocations +++ a/mm/memcontrol.c @@ -701,6 +701,15 @@ static struct mem_cgroup *get_mem_cgroup return memcg; } +static struct mem_cgroup *get_mem_cgroup(struct mem_cgroup *memcg) +{ + rcu_read_lock(); + if (!css_tryget_online(&memcg->css)) + memcg = NULL; + rcu_read_unlock(); + return memcg; +} + /** * mem_cgroup_iter - iterate over memory cgroup hierarchy * @root: hierarchy root @@ -2247,9 +2256,9 @@ static inline bool memcg_kmem_bypass(voi * done with it, memcg_kmem_put_cache() must be called to release the * reference. */ -struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) +struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep, + struct mem_cgroup *memcg) { - struct mem_cgroup *memcg; struct kmem_cache *memcg_cachep; int kmemcg_id; @@ -2261,7 +2270,10 @@ struct kmem_cache *memcg_kmem_get_cache( if (current->memcg_kmem_skip_account) return cachep; - memcg = get_mem_cgroup_from_mm(current->mm); + if (memcg) + memcg = get_mem_cgroup(memcg); + if (!memcg) + memcg = get_mem_cgroup_from_mm(current->mm); kmemcg_id = READ_ONCE(memcg->kmemcg_id); if (kmemcg_id < 0) goto out; diff -puN mm/slab.c~mm-memcg-plumbing-memcg-for-kmem-cache-allocations mm/slab.c --- a/mm/slab.c~mm-memcg-plumbing-memcg-for-kmem-cache-allocations +++ a/mm/slab.c @@ -3276,14 +3276,14 @@ must_grow: static __always_inline void * slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, - unsigned long caller) + struct mem_cgroup *memcg, unsigned long caller) { unsigned long save_flags; void *ptr; int slab_node = numa_mem_id(); flags &= gfp_allowed_mask; - cachep = slab_pre_alloc_hook(cachep, flags); + cachep = slab_pre_alloc_hook(cachep, flags, memcg); if (unlikely(!cachep)) return NULL; @@ -3356,13 +3356,14 @@ __do_cache_alloc(struct kmem_cache *cach #endif /* CONFIG_NUMA */ static __always_inline void * -slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) +slab_alloc(struct kmem_cache *cachep, gfp_t flags, struct mem_cgroup *memcg, + unsigned long caller) { unsigned long save_flags; void *objp; flags &= gfp_allowed_mask; - cachep = slab_pre_alloc_hook(cachep, flags); + cachep = slab_pre_alloc_hook(cachep, flags, memcg); if (unlikely(!cachep)) return NULL; @@ -3536,7 +3537,7 @@ void ___cache_free(struct kmem_cache *ca */ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) { - void *ret = slab_alloc(cachep, flags, _RET_IP_); + void *ret = slab_alloc(cachep, flags, NULL, _RET_IP_); kasan_slab_alloc(cachep, ret, flags); trace_kmem_cache_alloc(_RET_IP_, ret, @@ -3546,6 +3547,19 @@ void *kmem_cache_alloc(struct kmem_cache } EXPORT_SYMBOL(kmem_cache_alloc); +void *kmem_cache_alloc_memcg(struct kmem_cache *cachep, gfp_t flags, + struct mem_cgroup *memcg) +{ + void *ret = slab_alloc(cachep, flags, memcg, _RET_IP_); + + kasan_slab_alloc(cachep, ret, flags); + trace_kmem_cache_alloc(_RET_IP_, ret, + cachep->object_size, cachep->size, flags); + + return ret; +} +EXPORT_SYMBOL(kmem_cache_alloc_memcg); + static __always_inline void cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p, unsigned long caller) @@ -3561,7 +3575,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca { size_t i; - s = slab_pre_alloc_hook(s, flags); + s = slab_pre_alloc_hook(s, flags, NULL); if (!s) return 0; @@ -3602,7 +3616,7 @@ kmem_cache_alloc_trace(struct kmem_cache { void *ret; - ret = slab_alloc(cachep, flags, _RET_IP_); + ret = slab_alloc(cachep, flags, NULL, _RET_IP_); kasan_kmalloc(cachep, ret, size, flags); trace_kmalloc(_RET_IP_, ret, @@ -3610,6 +3624,21 @@ kmem_cache_alloc_trace(struct kmem_cache return ret; } EXPORT_SYMBOL(kmem_cache_alloc_trace); + +void * +kmem_cache_alloc_memcg_trace(struct kmem_cache *cachep, gfp_t flags, + size_t size, struct mem_cgroup *memcg) +{ + void *ret; + + ret = slab_alloc(cachep, flags, memcg, _RET_IP_); + + kasan_kmalloc(cachep, ret, size, flags); + trace_kmalloc(_RET_IP_, ret, + size, cachep->size, flags); + return ret; +} +EXPORT_SYMBOL(kmem_cache_alloc_memcg_trace); #endif #ifdef CONFIG_NUMA @@ -3626,7 +3655,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace); */ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { - void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); + void *ret = slab_alloc_node(cachep, flags, nodeid, NULL, _RET_IP_); kasan_slab_alloc(cachep, ret, flags); trace_kmem_cache_alloc_node(_RET_IP_, ret, @@ -3637,6 +3666,20 @@ void *kmem_cache_alloc_node(struct kmem_ } EXPORT_SYMBOL(kmem_cache_alloc_node); +void *kmem_cache_alloc_node_memcg(struct kmem_cache *cachep, gfp_t flags, + int nodeid, struct mem_cgroup *memcg) +{ + void *ret = slab_alloc_node(cachep, flags, nodeid, memcg, _RET_IP_); + + kasan_slab_alloc(cachep, ret, flags); + trace_kmem_cache_alloc_node(_RET_IP_, ret, + cachep->object_size, cachep->size, + flags, nodeid); + + return ret; +} +EXPORT_SYMBOL(kmem_cache_alloc_node_memcg); + #ifdef CONFIG_TRACING void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, gfp_t flags, @@ -3645,7 +3688,7 @@ void *kmem_cache_alloc_node_trace(struct { void *ret; - ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); + ret = slab_alloc_node(cachep, flags, nodeid, NULL, _RET_IP_); kasan_kmalloc(cachep, ret, size, flags); trace_kmalloc_node(_RET_IP_, ret, @@ -3654,6 +3697,21 @@ void *kmem_cache_alloc_node_trace(struct return ret; } EXPORT_SYMBOL(kmem_cache_alloc_node_trace); + +void *kmem_cache_alloc_node_memcg_trace(struct kmem_cache *cachep, gfp_t flags, + int nodeid, size_t size, struct mem_cgroup *memcg) +{ + void *ret; + + ret = slab_alloc_node(cachep, flags, nodeid, memcg, _RET_IP_); + + kasan_kmalloc(cachep, ret, size, flags); + trace_kmalloc_node(_RET_IP_, ret, + size, cachep->size, + flags, nodeid); + return ret; +} +EXPORT_SYMBOL(kmem_cache_alloc_node_memcg_trace); #endif static __always_inline void * @@ -3700,7 +3758,7 @@ static __always_inline void *__do_kmallo cachep = kmalloc_slab(size, flags); if (unlikely(ZERO_OR_NULL_PTR(cachep))) return cachep; - ret = slab_alloc(cachep, flags, caller); + ret = slab_alloc(cachep, flags, NULL, caller); kasan_kmalloc(cachep, ret, size, flags); trace_kmalloc(caller, ret, diff -puN mm/slab.h~mm-memcg-plumbing-memcg-for-kmem-cache-allocations mm/slab.h --- a/mm/slab.h~mm-memcg-plumbing-memcg-for-kmem-cache-allocations +++ a/mm/slab.h @@ -410,7 +410,7 @@ static inline size_t slab_ksize(const st } static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, - gfp_t flags) + gfp_t flags, struct mem_cgroup *memcg) { flags &= gfp_allowed_mask; @@ -423,8 +423,8 @@ static inline struct kmem_cache *slab_pr return NULL; if (memcg_kmem_enabled() && - ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) - return memcg_kmem_get_cache(s); + ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT) || memcg)) + return memcg_kmem_get_cache(s, memcg); return s; } diff -puN mm/slob.c~mm-memcg-plumbing-memcg-for-kmem-cache-allocations mm/slob.c --- a/mm/slob.c~mm-memcg-plumbing-memcg-for-kmem-cache-allocations +++ a/mm/slob.c @@ -568,6 +568,13 @@ void *kmem_cache_alloc(struct kmem_cache } EXPORT_SYMBOL(kmem_cache_alloc); +void *kmem_cache_alloc_memcg(struct kmem_cache *cachep, gfp_t flags, + struct mem_cgroup *memcg) +{ + return kmem_cache_alloc(cachep, flags); +} +EXPORT_SYMBOL(kmem_cache_alloc_memcg); + #ifdef CONFIG_NUMA void *__kmalloc_node(size_t size, gfp_t gfp, int node) { diff -puN mm/slub.c~mm-memcg-plumbing-memcg-for-kmem-cache-allocations mm/slub.c --- a/mm/slub.c~mm-memcg-plumbing-memcg-for-kmem-cache-allocations +++ a/mm/slub.c @@ -2641,14 +2641,15 @@ static void *__slab_alloc(struct kmem_ca * Otherwise we can simply pick the next object from the lockless free list. */ static __always_inline void *slab_alloc_node(struct kmem_cache *s, - gfp_t gfpflags, int node, unsigned long addr) + gfp_t gfpflags, int node, struct mem_cgroup *memcg, + unsigned long addr) { void *object; struct kmem_cache_cpu *c; struct page *page; unsigned long tid; - s = slab_pre_alloc_hook(s, gfpflags); + s = slab_pre_alloc_hook(s, gfpflags, memcg); if (!s) return NULL; redo: @@ -2727,15 +2728,15 @@ redo: return object; } -static __always_inline void *slab_alloc(struct kmem_cache *s, - gfp_t gfpflags, unsigned long addr) +static __always_inline void *slab_alloc(struct kmem_cache *s, gfp_t gfpflags, + struct mem_cgroup *memcg, unsigned long addr) { - return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr); + return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, memcg, addr); } void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { - void *ret = slab_alloc(s, gfpflags, _RET_IP_); + void *ret = slab_alloc(s, gfpflags, NULL, _RET_IP_); trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); @@ -2744,21 +2745,44 @@ void *kmem_cache_alloc(struct kmem_cache } EXPORT_SYMBOL(kmem_cache_alloc); +void *kmem_cache_alloc_memcg(struct kmem_cache *s, gfp_t gfpflags, + struct mem_cgroup *memcg) +{ + void *ret = slab_alloc(s, gfpflags, memcg, _RET_IP_); + + trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, + s->size, gfpflags); + + return ret; +} +EXPORT_SYMBOL(kmem_cache_alloc_memcg); + #ifdef CONFIG_TRACING void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) { - void *ret = slab_alloc(s, gfpflags, _RET_IP_); + void *ret = slab_alloc(s, gfpflags, NULL, _RET_IP_); trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); kasan_kmalloc(s, ret, size, gfpflags); return ret; } EXPORT_SYMBOL(kmem_cache_alloc_trace); + +void *kmem_cache_alloc_memcg_trace(struct kmem_cache *s, gfp_t gfpflags, + size_t size, struct mem_cgroup *memcg) +{ + void *ret = slab_alloc(s, gfpflags, memcg, _RET_IP_); + + trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); + kasan_kmalloc(s, ret, size, gfpflags); + return ret; +} +EXPORT_SYMBOL(kmem_cache_alloc_memcg_trace); #endif #ifdef CONFIG_NUMA void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) { - void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); + void *ret = slab_alloc_node(s, gfpflags, node, NULL, _RET_IP_); trace_kmem_cache_alloc_node(_RET_IP_, ret, s->object_size, s->size, gfpflags, node); @@ -2767,12 +2791,24 @@ void *kmem_cache_alloc_node(struct kmem_ } EXPORT_SYMBOL(kmem_cache_alloc_node); +void *kmem_cache_alloc_node_memcg(struct kmem_cache *s, gfp_t gfpflags, + int node, struct mem_cgroup *memcg) +{ + void *ret = slab_alloc_node(s, gfpflags, node, memcg, _RET_IP_); + + trace_kmem_cache_alloc_node(_RET_IP_, ret, + s->object_size, s->size, gfpflags, node); + + return ret; +} +EXPORT_SYMBOL(kmem_cache_alloc_node_memcg); + #ifdef CONFIG_TRACING void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, int node, size_t size) { - void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); + void *ret = slab_alloc_node(s, gfpflags, node, NULL, _RET_IP_); trace_kmalloc_node(_RET_IP_, ret, size, s->size, gfpflags, node); @@ -2781,6 +2817,19 @@ void *kmem_cache_alloc_node_trace(struct return ret; } EXPORT_SYMBOL(kmem_cache_alloc_node_trace); + +void *kmem_cache_alloc_node_memcg_trace(struct kmem_cache *s, gfp_t gfpflags, + int node, size_t size, struct mem_cgroup *memcg) +{ + void *ret = slab_alloc_node(s, gfpflags, node, memcg, _RET_IP_); + + trace_kmalloc_node(_RET_IP_, ret, + size, s->size, gfpflags, node); + + kasan_kmalloc(s, ret, size, gfpflags); + return ret; +} +EXPORT_SYMBOL(kmem_cache_alloc_node_memcg_trace); #endif #endif @@ -3109,7 +3158,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca int i; /* memcg and kmem_cache debug support */ - s = slab_pre_alloc_hook(s, flags); + s = slab_pre_alloc_hook(s, flags, NULL); if (unlikely(!s)) return false; /* @@ -3755,7 +3804,7 @@ void *__kmalloc(size_t size, gfp_t flags if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc(s, flags, _RET_IP_); + ret = slab_alloc(s, flags, NULL, _RET_IP_); trace_kmalloc(_RET_IP_, ret, size, s->size, flags); @@ -3800,7 +3849,7 @@ void *__kmalloc_node(size_t size, gfp_t if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc_node(s, flags, node, _RET_IP_); + ret = slab_alloc_node(s, flags, node, NULL, _RET_IP_); trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); @@ -4305,7 +4354,7 @@ void *__kmalloc_track_caller(size_t size if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc(s, gfpflags, caller); + ret = slab_alloc(s, gfpflags, NULL, caller); /* Honor the call site pointer we received. */ trace_kmalloc(caller, ret, size, s->size, gfpflags); @@ -4335,7 +4384,7 @@ void *__kmalloc_node_track_caller(size_t if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc_node(s, gfpflags, node, caller); + ret = slab_alloc_node(s, gfpflags, node, NULL, caller); /* Honor the call site pointer we received. */ trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); _ Patches currently in -mm which might be from shakeelb@xxxxxxxxxx are mm-mlock-vmscan-no-more-skipping-pagevecs.patch mm-memcg-plumbing-memcg-for-kmem-cache-allocations.patch mm-memcg-plumbing-memcg-for-kmalloc-allocations.patch fs-fsnotify-account-fsnotify-metadata-to-kmemcg.patch -- To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html