SLUB sets number of minimum partial slabs for node (min_partial) using set_min_partial(). SLUB holds at least min_partial slabs even if they're empty to avoid excessive use of page allocator. set_min_partial() limits value of min_partial between MIN_PARTIAL and MAX_PARTIAL. As set_min_partial() can be called by min_partial_store() too, Only limit value of min_partial in kmem_cache_open() so that it can be changed to value that a user wants. Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> --- mm/slub.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 3a4458976ab7..a4964deccb61 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4002,10 +4002,6 @@ static int init_kmem_cache_nodes(struct kmem_cache *s) static void set_min_partial(struct kmem_cache *s, unsigned long min) { - if (min < MIN_PARTIAL) - min = MIN_PARTIAL; - else if (min > MAX_PARTIAL) - min = MAX_PARTIAL; s->min_partial = min; } @@ -4184,6 +4180,8 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) { + int min_partial; + s->flags = kmem_cache_flags(s->size, flags, s->name); #ifdef CONFIG_SLAB_FREELIST_HARDENED s->random = get_random_long(); @@ -4215,7 +4213,10 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) * The larger the object size is, the more slabs we want on the partial * list to avoid pounding the page allocator excessively. */ - set_min_partial(s, ilog2(s->size) / 2); + min_partial = min(MAX_PARTIAL, ilog2(s->size) / 2); + min_partial = max(MIN_PARTIAL, min_partial); + + set_min_partial(s, min_partial); set_cpu_partial(s); -- 2.33.1