Add the on_each_cpu_cond() function that wraps on_each_cpu_mask() and calculates the cpumask of cpus to IPI by calling a function supplied as a parameter in order to determine whether to IPI each specific cpu. The function works around allocation failure of cpumask variable in CONFIG_CPUMASK_OFFSTACK=y by itereating over cpus sending an IPI a time via smp_call_function_single(). The function is useful since it allows to seperate the specific code that decided in each case whether to IPI a specific cpu for a specific request from the common boilerplate code of handling creating the mask, handling failures etc. Signed-off-by: Gilad Ben-Yossef <gilad@xxxxxxxxxxxxx> CC: Chris Metcalf <cmetcalf@xxxxxxxxxx> CC: Christoph Lameter <cl@xxxxxxxxxxxxxxxxxxxx> CC: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx> CC: Frederic Weisbecker <fweisbec@xxxxxxxxx> CC: Russell King <linux@xxxxxxxxxxxxxxxx> CC: linux-mm@xxxxxxxxx CC: Pekka Enberg <penberg@xxxxxxxxxx> CC: Matt Mackall <mpm@xxxxxxxxxxx> CC: Sasha Levin <levinsasha928@xxxxxxxxx> CC: Rik van Riel <riel@xxxxxxxxxx> CC: Andi Kleen <andi@xxxxxxxxxxxxxx> CC: Alexander Viro <viro@xxxxxxxxxxxxxxxxxx> CC: linux-fsdevel@xxxxxxxxxxxxxxx CC: Avi Kivity <avi@xxxxxxxxxx> CC: Michal Nazarewicz <mina86@xxxxxxxxxx> CC: Kosaki Motohiro <kosaki.motohiro@xxxxxxxxx> CC: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> CC: Milton Miller <miltonm@xxxxxxx> --- include/linux/smp.h | 19 ++++++++++++++++ kernel/smp.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 0 deletions(-) diff --git a/include/linux/smp.h b/include/linux/smp.h index d0adb78..e1ea702 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -109,6 +109,15 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait); /* + * Call a function on each processor for which the supplied function + * cond_func returns a positive value. This may include the local + * processor. + */ +void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), + smp_call_func_t func, void *info, bool wait, + gfp_t gfpflags); + +/* * Mark the boot cpu "online" so that it can call console drivers in * printk() and can access its per-cpu storage. */ @@ -153,6 +162,16 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info) local_irq_enable(); \ } \ } while (0) +#define on_each_cpu_cond(cond_func, func, info, wait, gfpflags) \ + do { \ + preempt_disable(); \ + if (cond_func(0, info)) { \ + local_irq_disable(); \ + (func)(info); \ + local_irq_enable(); \ + } \ + preempt_enable(); \ + } while (0) static inline void smp_send_reschedule(int cpu) { } #define num_booting_cpus() 1 diff --git a/kernel/smp.c b/kernel/smp.c index a081e6c..fa0912a 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -730,3 +730,61 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, put_cpu(); } EXPORT_SYMBOL(on_each_cpu_mask); + +/* + * on_each_cpu_cond(): Call a function on each processor for which + * the supplied function cond_func returns true, optionally waiting + * for all the required CPUs to finish. This may include the local + * processor. + * @cond_func: A callback function that is passed a cpu id and + * the the info parameter. The function is called + * with preemption disabled. The function should + * return a blooean value indicating whether to IPI + * the specified CPU. + * @func: The function to run on all applicable CPUs. + * This must be fast and non-blocking. + * @info: An arbitrary pointer to pass to both functions. + * @wait: If true, wait (atomically) until function has + * completed on other CPUs. + * @gfpflags: GFP flags to use when allocating the cpumask + * used internally by the function. + * + * The function might sleep if the GFP flags indicates a non + * atomic allocation is allowed. + * + * You must not call this function with disabled interrupts or + * from a hardware interrupt handler or from a bottom half handler. + */ +void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), + smp_call_func_t func, void *info, bool wait, + gfp_t gfpflags) +{ + cpumask_var_t cpus; + int cpu, ret; + + might_sleep_if(gfpflags & __GFP_WAIT); + + if (likely(zalloc_cpumask_var(&cpus, (gfpflags|__GFP_NOWARN)))) { + preempt_disable(); + for_each_online_cpu(cpu) + if (cond_func(cpu, info)) + cpumask_set_cpu(cpu, cpus); + on_each_cpu_mask(cpus, func, info, wait); + preempt_enable(); + free_cpumask_var(cpus); + } else { + /* + * No free cpumask, bother. No matter, we'll + * just have to IPI them one by one. + */ + preempt_disable(); + for_each_online_cpu(cpu) + if (cond_func(cpu, info)) { + ret = smp_call_function_single(cpu, func, + info, wait); + WARN_ON_ONCE(!ret); + } + preempt_enable(); + } +} +EXPORT_SYMBOL(on_each_cpu_cond); -- 1.7.0.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>