x86 has two smp functions on_cpu() and on_cpu_async(). These are nice because we can run them more than once on cpus. This patch adds the same API to ARM, but the target cpu requirements are not the same. While on x86 the interrupt controller must be enabled enough to send/receive IPIs and the target cpu must have interrupts enabled (or be expected to enable them), on ARM we do not use IPIs, so the GIC does not need to be enabled and cpus do not need to register irq handlers nor enable interrupts. Instead, target cpus are expected to be powered off or idle. The advantage of the ARM approach is the lack of interrupt handling dependencies. The disadvantage is the lack of preemption - even on_cpu_async() must wait for the target cpu to be idle first. Signed-off-by: Andrew Jones <drjones@xxxxxxxxxx> --- lib/arm/asm/smp.h | 2 ++ lib/arm/smp.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 55 insertions(+), 5 deletions(-) diff --git a/lib/arm/asm/smp.h b/lib/arm/asm/smp.h index a352d76e710d..ca088d654516 100644 --- a/lib/arm/asm/smp.h +++ b/lib/arm/asm/smp.h @@ -48,6 +48,8 @@ static inline void set_cpu_idle(int cpu, bool idle) typedef void (*secondary_entry_fn)(void); extern void smp_boot_secondary(int cpu, secondary_entry_fn entry); +extern void on_cpu_async(int cpu, void (*func)(void *data), void *data); +extern void on_cpu(int cpu, void (*func)(void *data), void *data); extern void smp_run(void (*func)(void)); #endif /* _ASMARM_SMP_H_ */ diff --git a/lib/arm/smp.c b/lib/arm/smp.c index 3f8457cbf3a1..f3542b8bad99 100644 --- a/lib/arm/smp.c +++ b/lib/arm/smp.c @@ -48,14 +48,10 @@ secondary_entry_fn secondary_cinit(void) return entry; } -void smp_boot_secondary(int cpu, secondary_entry_fn entry) +static void __smp_boot_secondary(int cpu, secondary_entry_fn entry) { int ret; - spin_lock(&lock); - - assert_msg(!cpu_online(cpu), "CPU%d already boot once", cpu); - secondary_data.stack = thread_stack_alloc(); secondary_data.entry = entry; mmu_mark_disabled(cpu); @@ -64,10 +60,23 @@ void smp_boot_secondary(int cpu, secondary_entry_fn entry) while (!cpu_online(cpu)) wfe(); +} +void smp_boot_secondary(int cpu, secondary_entry_fn entry) +{ + spin_lock(&lock); + assert_msg(!cpu_online(cpu), "CPU%d already boot once", cpu); + __smp_boot_secondary(cpu, entry); spin_unlock(&lock); } +typedef void (*on_cpu_func)(void *); +struct on_cpu_info { + on_cpu_func func; + void *data; +}; +static struct on_cpu_info on_cpu_info[NR_CPUS]; + void do_idle(void) { int cpu = smp_processor_id(); @@ -78,11 +87,50 @@ void do_idle(void) for (;;) { while (cpu_idle(cpu)) wfe(); + smp_rmb(); + on_cpu_info[cpu].func(on_cpu_info[cpu].data); + on_cpu_info[cpu].func = NULL; + smp_wmb(); set_cpu_idle(cpu, true); sev(); } } +void on_cpu_async(int cpu, void (*func)(void *data), void *data) +{ + if (cpu == smp_processor_id()) { + func(data); + return; + } + + spin_lock(&lock); + if (!cpu_online(cpu)) + __smp_boot_secondary(cpu, do_idle); + spin_unlock(&lock); + + for (;;) { + while (!cpu_idle(cpu)) + wfe(); + spin_lock(&lock); + if ((volatile void *)on_cpu_info[cpu].func == NULL) + break; + spin_unlock(&lock); + } + on_cpu_info[cpu].func = func; + on_cpu_info[cpu].data = data; + spin_unlock(&lock); + set_cpu_idle(cpu, false); + sev(); +} + +void on_cpu(int cpu, void (*func)(void *data), void *data) +{ + on_cpu_async(cpu, func, data); + + while (!cpu_idle(cpu)) + wfe(); +} + void smp_run(void (*func)(void)) { int cpu; -- 2.9.4