On 9/20/23 15:36, Feng Tang wrote: > On Fri, Sep 08, 2023 at 10:53:07PM +0800, Vlastimil Babka wrote: >> After the previous cleanups, we can now move some code from >> calc_slab_order() to calculate_order() so it's executed just once, and >> do some more cleanups. >> >> - move the min_order and MAX_OBJS_PER_PAGE evaluation to >> calc_slab_order(). > > Nit: here is to 'move ... to calculate_order()'? Oops, right, fixed. > I tried this patch series with normal boot on a desktop and one 2 > socket server: patch 2/4 doesn't change order of any slab, and patch > 3/4 does make the slab order of big objects more consistent. > > Thanks for making the code much cleaner! And for the whole series, > > Reviewed-by: Feng Tang <feng.tang@xxxxxxxxx> Thanks! Applied. > >> - change calc_slab_order() parameter min_objects to min_order >> >> Also make MAX_OBJS_PER_PAGE check more robust by considering also >> min_objects in addition to slub_min_order. Otherwise this is not a >> functional change. >> >> Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> >> --- >> mm/slub.c | 19 +++++++++---------- >> 1 file changed, 9 insertions(+), 10 deletions(-) >> >> diff --git a/mm/slub.c b/mm/slub.c >> index f04eb029d85a..1c91f72c7239 100644 >> --- a/mm/slub.c >> +++ b/mm/slub.c >> @@ -4110,17 +4110,12 @@ static unsigned int slub_min_objects; >> * the smallest order which will fit the object. >> */ >> static inline unsigned int calc_slab_order(unsigned int size, >> - unsigned int min_objects, unsigned int max_order, >> + unsigned int min_order, unsigned int max_order, >> unsigned int fract_leftover) >> { >> - unsigned int min_order = slub_min_order; >> unsigned int order; >> >> - if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) >> - return get_order(size * MAX_OBJS_PER_PAGE) - 1; >> - >> - for (order = max(min_order, (unsigned int)get_order(min_objects * size)); >> - order <= max_order; order++) { >> + for (order = min_order; order <= max_order; order++) { >> >> unsigned int slab_size = (unsigned int)PAGE_SIZE << order; >> unsigned int rem; >> @@ -4139,7 +4134,7 @@ static inline int calculate_order(unsigned int size) >> unsigned int order; >> unsigned int min_objects; >> unsigned int max_objects; >> - unsigned int nr_cpus; >> + unsigned int min_order; >> >> min_objects = slub_min_objects; >> if (!min_objects) { >> @@ -4152,7 +4147,7 @@ static inline int calculate_order(unsigned int size) >> * order on systems that appear larger than they are, and too >> * low order on systems that appear smaller than they are. >> */ >> - nr_cpus = num_present_cpus(); >> + unsigned int nr_cpus = num_present_cpus(); >> if (nr_cpus <= 1) >> nr_cpus = nr_cpu_ids; >> min_objects = 4 * (fls(nr_cpus) + 1); >> @@ -4160,6 +4155,10 @@ static inline int calculate_order(unsigned int size) >> max_objects = order_objects(slub_max_order, size); >> min_objects = min(min_objects, max_objects); >> >> + min_order = max(slub_min_order, (unsigned int)get_order(min_objects * size)); >> + if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) >> + return get_order(size * MAX_OBJS_PER_PAGE) - 1; >> + >> /* >> * Attempt to find best configuration for a slab. This works by first >> * attempting to generate a layout with the best possible configuration and >> @@ -4176,7 +4175,7 @@ static inline int calculate_order(unsigned int size) >> * long as at least single object fits within slub_max_order. >> */ >> for (unsigned int fraction = 16; fraction > 1; fraction /= 2) { >> - order = calc_slab_order(size, min_objects, slub_max_order, >> + order = calc_slab_order(size, min_order, slub_max_order, >> fraction); >> if (order <= slub_max_order) >> return order; >> -- >> 2.42.0 >> >>