Now the partial counters are ready, let's use them directly and get rid of count_partial(). Tested-by: James Wang <jnwang@xxxxxxxxxxxxxxxxx> Reviewed-by: Pekka Enberg <penberg@xxxxxxxxxx> Signed-off-by: Xunlei Pang <xlpang@xxxxxxxxxxxxxxxxx> --- mm/slub.c | 54 ++++++++++++++++++++++-------------------------------- 1 file changed, 22 insertions(+), 32 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 4d02831..3f76b57 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2533,11 +2533,6 @@ static inline int node_match(struct page *page, int node) } #ifdef CONFIG_SLUB_DEBUG -static int count_free(struct page *page) -{ - return page->objects - page->inuse; -} - static inline unsigned long node_nr_objs(struct kmem_cache_node *n) { return atomic_long_read(&n->total_objects); @@ -2545,19 +2540,26 @@ static inline unsigned long node_nr_objs(struct kmem_cache_node *n) #endif /* CONFIG_SLUB_DEBUG */ #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS) -static unsigned long count_partial(struct kmem_cache_node *n, - int (*get_count)(struct page *)) +enum partial_item { PARTIAL_FREE, PARTIAL_INUSE, PARTIAL_TOTAL }; + +static unsigned long partial_counter(struct kmem_cache_node *n, + enum partial_item item) { - unsigned long flags; - unsigned long x = 0; - struct page *page; + unsigned long ret = 0; - spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, slab_list) - x += get_count(page); - spin_unlock_irqrestore(&n->list_lock, flags); - return x; + if (item == PARTIAL_FREE) { + ret = atomic_long_read(&n->partial_free_objs); + } else if (item == PARTIAL_TOTAL) { + ret = n->partial_total_objs; + } else if (item == PARTIAL_INUSE) { + ret = n->partial_total_objs - atomic_long_read(&n->partial_free_objs); + if ((long)ret < 0) + ret = 0; + } + + return ret; } + #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ static noinline void @@ -2587,7 +2589,7 @@ static unsigned long count_partial(struct kmem_cache_node *n, unsigned long nr_objs; unsigned long nr_free; - nr_free = count_partial(n, count_free); + nr_free = partial_counter(n, PARTIAL_FREE); nr_slabs = node_nr_slabs(n); nr_objs = node_nr_objs(n); @@ -4643,18 +4645,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, EXPORT_SYMBOL(__kmalloc_node_track_caller); #endif -#ifdef CONFIG_SYSFS -static int count_inuse(struct page *page) -{ - return page->inuse; -} - -static int count_total(struct page *page) -{ - return page->objects; -} -#endif - #ifdef CONFIG_SLUB_DEBUG static void validate_slab(struct kmem_cache *s, struct page *page) { @@ -5091,7 +5081,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, x = atomic_long_read(&n->total_objects); else if (flags & SO_OBJECTS) x = atomic_long_read(&n->total_objects) - - count_partial(n, count_free); + partial_counter(n, PARTIAL_FREE); else x = atomic_long_read(&n->nr_slabs); total += x; @@ -5105,9 +5095,9 @@ static ssize_t show_slab_objects(struct kmem_cache *s, for_each_kmem_cache_node(s, node, n) { if (flags & SO_TOTAL) - x = count_partial(n, count_total); + x = partial_counter(n, PARTIAL_TOTAL); else if (flags & SO_OBJECTS) - x = count_partial(n, count_inuse); + x = partial_counter(n, PARTIAL_INUSE); else x = n->nr_partial; total += x; @@ -5873,7 +5863,7 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) for_each_kmem_cache_node(s, node, n) { nr_slabs += node_nr_slabs(n); nr_objs += node_nr_objs(n); - nr_free += count_partial(n, count_free); + nr_free += partial_counter(n, PARTIAL_FREE); } sinfo->active_objs = nr_objs - nr_free; -- 1.8.3.1