It is suggested that cpumask_var_t and alloc_cpumask_var() should be used instead of struct cpumask. But I don't want to add this complicity nor leave this unwelcome "static struct cpumask has_work;", so I just remove it and use flush_work() to perform on all online drain_work. flush_work() performs very quickly on initialized but unused work item, thus we don't need the struct cpumask has_work for performance. CC: akpm@xxxxxxxxxxxxxxxxxxxx CC: Chris Metcalf <cmetcalf@xxxxxxxxxx> CC: Mel Gorman <mgorman@xxxxxxx> CC: Tejun Heo <tj@xxxxxxxxxx> CC: Christoph Lameter <cl@xxxxxxxxxx> CC: Frederic Weisbecker <fweisbec@xxxxxxxxx> Signed-off-by: Lai Jiangshan <laijs@xxxxxxxxxxxxxx> --- mm/swap.c | 11 ++++------- 1 files changed, 4 insertions(+), 7 deletions(-) diff --git a/mm/swap.c b/mm/swap.c index 9e8e347..bb524ca 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -833,27 +833,24 @@ static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); void lru_add_drain_all(void) { static DEFINE_MUTEX(lock); - static struct cpumask has_work; int cpu; mutex_lock(&lock); get_online_cpus(); - cpumask_clear(&has_work); for_each_online_cpu(cpu) { struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); + INIT_WORK(work, lru_add_drain_per_cpu); + if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || - need_activate_page_drain(cpu)) { - INIT_WORK(work, lru_add_drain_per_cpu); + need_activate_page_drain(cpu)) schedule_work_on(cpu, work); - cpumask_set_cpu(cpu, &has_work); - } } - for_each_cpu(cpu, &has_work) + for_each_online_cpu(cpu) flush_work(&per_cpu(lru_add_drain_work, cpu)); put_online_cpus(); -- 1.7.4.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>