Allocate and release memory to store obj_cgroup pointers for each non-root slab page. Reuse page->mem_cgroup pointer to store a pointer to the allocated space. To distinguish between obj_cgroups and memcg pointers in case when it's not obvious which one is used (as in page_cgroup_ino()), let's always set the lowest bit in the obj_cgroup case. Signed-off-by: Roman Gushchin <guro@xxxxxx> --- include/linux/mm_types.h | 5 ++++- include/linux/slab_def.h | 5 +++++ include/linux/slub_def.h | 2 ++ mm/memcontrol.c | 17 +++++++++++--- mm/slab.c | 3 ++- mm/slab.h | 48 ++++++++++++++++++++++++++++++++++++++++ mm/slub.c | 5 +++++ 7 files changed, 80 insertions(+), 5 deletions(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 4aba6c0c2ba8..0ad7e700f26d 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -198,7 +198,10 @@ struct page { atomic_t _refcount; #ifdef CONFIG_MEMCG - struct mem_cgroup *mem_cgroup; + union { + struct mem_cgroup *mem_cgroup; + struct obj_cgroup **obj_cgroups; + }; #endif /* diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index abc7de77b988..967a9a525eab 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -114,4 +114,9 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache, return reciprocal_divide(offset, cache->reciprocal_buffer_size); } +static inline int objs_per_slab(const struct kmem_cache *cache) +{ + return cache->num; +} + #endif /* _LINUX_SLAB_DEF_H */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 200ea292f250..cbda7d55796a 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -191,4 +191,6 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache, cache->reciprocal_size); } +extern int objs_per_slab(struct kmem_cache *cache); + #endif /* _LINUX_SLUB_DEF_H */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7f87a0eeafec..63826e460b3f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -549,10 +549,21 @@ ino_t page_cgroup_ino(struct page *page) unsigned long ino = 0; rcu_read_lock(); - if (PageSlab(page) && !PageTail(page)) + if (PageSlab(page) && !PageTail(page)) { memcg = memcg_from_slab_page(page); - else - memcg = READ_ONCE(page->mem_cgroup); + } else { + memcg = page->mem_cgroup; + + /* + * The lowest bit set means that memcg isn't a valid + * memcg pointer, but a obj_cgroups pointer. + * In this case the page is shared and doesn't belong + * to any specific memory cgroup. + */ + if ((unsigned long) memcg & 0x1UL) + memcg = NULL; + } + while (memcg && !(memcg->css.flags & CSS_ONLINE)) memcg = parent_mem_cgroup(memcg); if (memcg) diff --git a/mm/slab.c b/mm/slab.c index 9350062ffc1a..f2d67984595b 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1370,7 +1370,8 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, return NULL; } - if (charge_slab_page(page, flags, cachep->gfporder, cachep)) { + if (charge_slab_page(page, flags, cachep->gfporder, cachep, + cachep->num)) { __free_pages(page, cachep->gfporder); return NULL; } diff --git a/mm/slab.h b/mm/slab.h index 8a574d9361c1..44def57f050e 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -319,6 +319,18 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) return s->memcg_params.root_cache; } +static inline struct obj_cgroup **page_obj_cgroups(struct page *page) +{ + /* + * page->mem_cgroup and page->obj_cgroups are sharing the same + * space. To distinguish between them in case we don't know for sure + * that the page is a slab page (e.g. page_cgroup_ino()), let's + * always set the lowest bit of obj_cgroups. + */ + return (struct obj_cgroup **) + ((unsigned long)page->obj_cgroups & ~0x1UL); +} + /* * Expects a pointer to a slab page. Please note, that PageSlab() check * isn't sufficient, as it returns true also for tail compound slab pages, @@ -406,6 +418,25 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order, percpu_ref_put_many(&s->memcg_params.refcnt, nr_pages); } +static inline int memcg_alloc_page_obj_cgroups(struct page *page, gfp_t gfp, + unsigned int objects) +{ + void *vec; + + vec = kcalloc(objects, sizeof(struct obj_cgroup *), gfp); + if (!vec) + return -ENOMEM; + + page->obj_cgroups = (struct obj_cgroup **) ((unsigned long)vec | 0x1UL); + return 0; +} + +static inline void memcg_free_page_obj_cgroups(struct page *page) +{ + kfree(page_obj_cgroups(page)); + page->obj_cgroups = NULL; +} + extern void slab_init_memcg_params(struct kmem_cache *); extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg); @@ -455,6 +486,16 @@ static inline void memcg_uncharge_slab(struct page *page, int order, { } +static inline int memcg_alloc_page_obj_cgroups(struct page *page, gfp_t gfp, + unsigned int objects) +{ + return 0; +} + +static inline void memcg_free_page_obj_cgroups(struct page *page) +{ +} + static inline void slab_init_memcg_params(struct kmem_cache *s) { } @@ -481,12 +522,18 @@ static __always_inline int charge_slab_page(struct page *page, gfp_t gfp, int order, struct kmem_cache *s) { + int ret; + if (is_root_cache(s)) { mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), PAGE_SIZE << order); return 0; } + ret = memcg_alloc_page_obj_cgroups(page, gfp, objs_per_slab(s)); + if (ret) + return ret; + return memcg_charge_slab(page, gfp, order, s); } @@ -499,6 +546,7 @@ static __always_inline void uncharge_slab_page(struct page *page, int order, return; } + memcg_free_page_obj_cgroups(page); memcg_uncharge_slab(page, order, s); } diff --git a/mm/slub.c b/mm/slub.c index 8d16babe1829..68c2c45dfac1 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5992,4 +5992,9 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, { return -EIO; } + +int objs_per_slab(struct kmem_cache *cache) +{ + return oo_objects(cache->oo); +} #endif /* CONFIG_SLUB_DEBUG */ -- 2.25.3