This commit implements SLUB version of the obj_to_index() function, which will be required to calculate the offset of memcg_ptr in the mem_cgroup_vec to store/obtain the memcg ownership data. To make it faster, let's repeat the SLAB's trick introduced by commit 6a2d7a955d8d ("[PATCH] SLAB: use a multiply instead of a divide in obj_to_index()") and avoid an expensive division. Signed-off-by: Roman Gushchin <guro@xxxxxx> --- include/linux/slub_def.h | 9 +++++++++ mm/slub.c | 1 + 2 files changed, 10 insertions(+) diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index d2153789bd9f..200ea292f250 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -8,6 +8,7 @@ * (C) 2007 SGI, Christoph Lameter */ #include <linux/kobject.h> +#include <linux/reciprocal_div.h> enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ @@ -86,6 +87,7 @@ struct kmem_cache { unsigned long min_partial; unsigned int size; /* The size of an object including metadata */ unsigned int object_size;/* The size of an object without metadata */ + struct reciprocal_value reciprocal_size; unsigned int offset; /* Free pointer offset */ #ifdef CONFIG_SLUB_CPU_PARTIAL /* Number of per cpu partial objects to keep around */ @@ -182,4 +184,11 @@ static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, return result; } +static inline unsigned int obj_to_index(const struct kmem_cache *cache, + const struct page *page, void *obj) +{ + return reciprocal_divide(kasan_reset_tag(obj) - page_address(page), + cache->reciprocal_size); +} + #endif /* _LINUX_SLUB_DEF_H */ diff --git a/mm/slub.c b/mm/slub.c index 3014158c100d..b043cfb673c9 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3587,6 +3587,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) */ size = ALIGN(size, s->align); s->size = size; + s->reciprocal_size = reciprocal_value(size); if (forced_order >= 0) order = forced_order; else -- 2.21.0