Introduce schedule_on_each_cpu_mask function to schedule a work item on each online CPU which is included in the mask provided. Then re-implement schedule_on_each_cpu on top of the new function. This function should be prefered to schedule_on_each_cpu in any case where some of the CPUs, especially on a big multi-core system, might not have actual work to perform in order to save needless wakeups and schedules. Signed-off-by: Gilad Ben-Yossef <gilad@xxxxxxxxxxxxx> CC: Thomas Gleixner <tglx@xxxxxxxxxxxxx> CC: Tejun Heo <tj@xxxxxxxxxx> CC: John Stultz <johnstul@xxxxxxxxxx> CC: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> CC: KOSAKI Motohiro <kosaki.motohiro@xxxxxxxxxxxxxx> CC: Mel Gorman <mel@xxxxxxxxx> CC: Mike Frysinger <vapier@xxxxxxxxxx> CC: David Rientjes <rientjes@xxxxxxxxxx> CC: Hugh Dickins <hughd@xxxxxxxxxx> CC: Minchan Kim <minchan.kim@xxxxxxxxx> CC: Konstantin Khlebnikov <khlebnikov@xxxxxxxxxx> CC: Christoph Lameter <cl@xxxxxxxxx> CC: Chris Metcalf <cmetcalf@xxxxxxxxxx> CC: Hakan Akkan <hakanakkan@xxxxxxxxx> CC: Max Krasnyansky <maxk@xxxxxxxxxxxx> CC: Frederic Weisbecker <fweisbec@xxxxxxxxx> CC: linux-kernel@xxxxxxxxxxxxxxx CC: linux-mm@xxxxxxxxx --- include/linux/workqueue.h | 2 ++ kernel/workqueue.c | 36 ++++++++++++++++++++++++++++-------- 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index af15545..20da95a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -383,6 +383,8 @@ extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay) extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); extern int schedule_on_each_cpu(work_func_t func); +extern int schedule_on_each_cpu_mask(work_func_t func, + const struct cpumask *mask); extern int keventd_up(void); int execute_in_process_context(work_func_t fn, struct execute_work *); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 5abf42f..1c9782b 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2787,43 +2787,63 @@ int schedule_delayed_work_on(int cpu, EXPORT_SYMBOL(schedule_delayed_work_on); /** - * schedule_on_each_cpu - execute a function synchronously on each online CPU + * schedule_on_each_cpu_mask - execute a function synchronously on each + * online CPU which is specified in the supplied cpumask * @func: the function to call + * @mask: the cpu mask * - * schedule_on_each_cpu() executes @func on each online CPU using the - * system workqueue and blocks until all CPUs have completed. - * schedule_on_each_cpu() is very slow. + * schedule_on_each_cpu_mask() executes @func on each online CPU which + * is part of the @mask using the * system workqueue and blocks until + * all CPUs have completed + * schedule_on_each_cpu_mask() is very slow. * * RETURNS: * 0 on success, -errno on failure. */ -int schedule_on_each_cpu(work_func_t func) +int schedule_on_each_cpu_mask(work_func_t func, const struct cpumask *mask) { int cpu; struct work_struct __percpu *works; works = alloc_percpu(struct work_struct); - if (!works) + if (unlikely(!works)) return -ENOMEM; get_online_cpus(); - for_each_online_cpu(cpu) { + for_each_cpu_and(cpu, mask, cpu_online_mask) { struct work_struct *work = per_cpu_ptr(works, cpu); INIT_WORK(work, func); schedule_work_on(cpu, work); } - for_each_online_cpu(cpu) + for_each_cpu_and(cpu, mask, cpu_online_mask) flush_work(per_cpu_ptr(works, cpu)); put_online_cpus(); free_percpu(works); + return 0; } /** + * schedule_on_each_cpu - execute a function synchronously on each online CPU + * @func: the function to call + * + * schedule_on_each_cpu() executes @func on each online CPU using the + * system workqueue and blocks until all CPUs have completed. + * schedule_on_each_cpu() is very slow. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int schedule_on_each_cpu(work_func_t func) +{ + return schedule_on_each_cpu_mask(func, cpu_online_mask); +} + +/** * flush_scheduled_work - ensure that any scheduled work has run to completion. * * Forces execution of the kernel-global workqueue and blocks until its -- 1.7.0.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>