Add the on_each_cpu_cond() function that wraps on_each_cpu_mask() and calculates the cpumask of cpus to IPI by calling a function supplied as a parameter in order to determine whether to IPI each specific cpu. The function works around allocation failure of cpumask variable in CONFIG_CPUMASK_OFFSTACK=y by itereating over cpus sending an IPI a time via smp_call_function_single(). The function is useful since it allows to seperate the specific code that decided in each case whether to IPI a specific cpu for a specific request from the common boilerplate code of handling creating the mask, handling failures etc. Signed-off-by: Gilad Ben-Yossef <gilad@xxxxxxxxxxxxx> CC: Chris Metcalf <cmetcalf@xxxxxxxxxx> CC: Christoph Lameter <cl@xxxxxxxxxxxxxxxxxxxx> CC: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx> CC: Frederic Weisbecker <fweisbec@xxxxxxxxx> CC: Russell King <linux@xxxxxxxxxxxxxxxx> CC: linux-mm@xxxxxxxxx CC: Pekka Enberg <penberg@xxxxxxxxxx> CC: Matt Mackall <mpm@xxxxxxxxxxx> CC: Sasha Levin <levinsasha928@xxxxxxxxx> CC: Rik van Riel <riel@xxxxxxxxxx> CC: Andi Kleen <andi@xxxxxxxxxxxxxx> CC: Alexander Viro <viro@xxxxxxxxxxxxxxxxxx> CC: linux-fsdevel@xxxxxxxxxxxxxxx CC: Avi Kivity <avi@xxxxxxxxxx> CC: Michal Nazarewicz <mina86@xxxxxxxxxx> CC: Kosaki Motohiro <kosaki.motohiro@xxxxxxxxx> CC: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/smp.h | 16 ++++++++++++++++ kernel/smp.c | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+), 0 deletions(-) diff --git a/include/linux/smp.h b/include/linux/smp.h index a3a14d9..a37f388 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -109,6 +109,14 @@ void on_each_cpu_mask(const struct cpumask *mask, void (*func)(void *), void *info, bool wait); /* + * Call a function on each processor for which the supplied function + * cond_func returns a positive value. This may include the local + * processor. + */ +void on_each_cpu_cond(int (*cond_func) (int cpu, void *info), + void (*func)(void *), void *info, bool wait); + +/* * Mark the boot cpu "online" so that it can call console drivers in * printk() and can access its per-cpu storage. */ @@ -153,6 +161,14 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info) local_irq_enable(); \ } \ } while (0) +#define on_each_cpu_cond(cond_func, func, info, wait) \ + do { \ + if (cond_func(0, info)) { \ + local_irq_disable(); \ + (func)(info); \ + local_irq_enable(); \ + } \ + } while (0) static inline void smp_send_reschedule(int cpu) { } #define num_booting_cpus() 1 diff --git a/kernel/smp.c b/kernel/smp.c index 7c0cbd7..bd8f4ad 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -721,3 +721,41 @@ void on_each_cpu_mask(const struct cpumask *mask, void (*func)(void *), put_cpu(); } EXPORT_SYMBOL(on_each_cpu_mask); + +/* + * Call a function on each processor for which the supplied function + * cond_func returns a positive value. This may include the local + * processor, optionally waiting for all the required CPUs to finish. + * All the limitations specified in smp_call_function_many apply. + */ +void on_each_cpu_cond(int (*cond_func) (int cpu, void *info), + void (*func)(void *), void *info, bool wait) +{ + cpumask_var_t cpus; + int cpu; + + if (likely(zalloc_cpumask_var(&cpus, GFP_ATOMIC))) { + for_each_online_cpu(cpu) + if (cond_func(cpu, info)) + cpumask_set_cpu(cpu, cpus); + on_each_cpu_mask(cpus, func, info, wait); + free_cpumask_var(cpus); + } else { + /* + * No free cpumask, bother. No matter, we'll + * just have to IPI them one by one. + */ + for_each_online_cpu(cpu) + if (cond_func(cpu, info)) + /* + * This call can fail if we ask it to IPI an + * offline CPU, but this can be a valid + * sceanrio here. Also, on_each_cpu_mask + * ignores offlines CPUs. So, we ignore + * the return value here. + */ + smp_call_function_single(cpu, func, info, wait); + } +} +EXPORT_SYMBOL(on_each_cpu_cond); + -- 1.7.0.4 -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html