We currently call pageset_set_high_and_batch() for each possible cpu, which repeats the same calculations of high and batch values. Instead call it once per zone, and it applies the calculated values to all per-cpu pagesets of the zone. This also allows removing zone_pageset_init() and __zone_pcp_update() wrappers. No functional change. Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> --- mm/page_alloc.c | 40 +++++++++++++++++----------------------- 1 file changed, 17 insertions(+), 23 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0b516208afda..f669a251f654 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6236,12 +6236,13 @@ static void setup_pageset(struct per_cpu_pageset *p) pageset_update(&p->pcp, 0, 1); } -static void pageset_set_high_and_batch(struct zone *zone, - struct per_cpu_pageset *p) +static void zone_set_pageset_high_and_batch(struct zone *zone) { unsigned long new_high; unsigned long new_batch; int fraction = READ_ONCE(percpu_pagelist_fraction); + int cpu; + struct per_cpu_pageset *p; if (fraction) { new_high = zone_managed_pages(zone) / fraction; @@ -6253,23 +6254,25 @@ static void pageset_set_high_and_batch(struct zone *zone, new_high = 6 * new_batch; new_batch = max(1UL, 1 * new_batch); } - pageset_update(&p->pcp, new_high, new_batch); -} - -static void __meminit zone_pageset_init(struct zone *zone, int cpu) -{ - struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); - pageset_init(pcp); - pageset_set_high_and_batch(zone, pcp); + for_each_possible_cpu(cpu) { + p = per_cpu_ptr(zone->pageset, cpu); + pageset_update(&p->pcp, new_high, new_batch); + } } void __meminit setup_zone_pageset(struct zone *zone) { int cpu; + struct per_cpu_pageset *p; + zone->pageset = alloc_percpu(struct per_cpu_pageset); - for_each_possible_cpu(cpu) - zone_pageset_init(zone, cpu); + for_each_possible_cpu(cpu) { + p = per_cpu_ptr(zone->pageset, cpu); + pageset_init(p); + } + + zone_set_pageset_high_and_batch(zone); } /* @@ -8002,15 +8005,6 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, return 0; } -static void __zone_pcp_update(struct zone *zone) -{ - unsigned int cpu; - - for_each_possible_cpu(cpu) - pageset_set_high_and_batch(zone, - per_cpu_ptr(zone->pageset, cpu)); -} - /* * percpu_pagelist_fraction - changes the pcp->high for each zone on each * cpu. It is the fraction of total pages in each zone that a hot per cpu @@ -8043,7 +8037,7 @@ int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write, goto out; for_each_populated_zone(zone) - __zone_pcp_update(zone); + zone_set_pageset_high_and_batch(zone); out: mutex_unlock(&pcp_batch_high_lock); return ret; @@ -8659,7 +8653,7 @@ EXPORT_SYMBOL(free_contig_range); void __meminit zone_pcp_update(struct zone *zone) { mutex_lock(&pcp_batch_high_lock); - __zone_pcp_update(zone); + zone_set_pageset_high_and_batch(zone); mutex_unlock(&pcp_batch_high_lock); } -- 2.28.0