Upcoming patches will need to check whether a CPU needs to drain its LRU pagevecs on multiple locations. So move the check into its own function. Signed-off-by: Nicolas Saenz Julienne <nsaenzju@xxxxxxxxxx> --- mm/swap.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/mm/swap.c b/mm/swap.c index 04b678342c02..e7f9e4018ccf 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -726,6 +726,17 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy) lru_add_drain(); } +static bool lru_cpu_needs_drain(int cpu) +{ + return pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) || + data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) || + pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) || + pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) || + pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) || + need_activate_page_drain(cpu) || + has_bh_in_lru(cpu, NULL); +} + /* * Doesn't need any cpu hotplug locking because we do rely on per-cpu * kworkers being shut down before our page_alloc_cpu_dead callback is @@ -808,14 +819,7 @@ inline void __lru_add_drain_all(bool force_all_cpus) for_each_online_cpu(cpu) { struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); - if (force_all_cpus || - pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) || - data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) || - pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) || - pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) || - pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) || - need_activate_page_drain(cpu) || - has_bh_in_lru(cpu, NULL)) { + if (force_all_cpus || lru_cpu_needs_drain(cpu)) { INIT_WORK(work, lru_add_drain_per_cpu); queue_work_on(cpu, mm_percpu_wq, work); __cpumask_set_cpu(cpu, &has_work); -- 2.31.1