This avoids needing to compute the task pointer in this function, allowing it to be used as the source of identification in the future. Signed-off-by: Keith Packard <keithpac@xxxxxxxxxx> --- arch/arm/include/asm/smp.h | 3 ++- arch/arm/kernel/head-nommu.S | 1 + arch/arm/kernel/head.S | 1 + arch/arm/kernel/smp.c | 8 +++++--- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 86a7fd721556..d43b64635d77 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -48,7 +48,7 @@ extern void set_smp_ipi_range(int ipi_base, int nr_ipi); * Called from platform specific assembly code, this is the * secondary CPU entry point. */ -asmlinkage void secondary_start_kernel(unsigned int cpu); +asmlinkage void secondary_start_kernel(unsigned int cpu, struct task_struct *task); /* @@ -62,6 +62,7 @@ struct secondary_data { unsigned long swapper_pg_dir; void *stack; unsigned int cpu; + struct task_struct *task; }; extern struct secondary_data secondary_data; extern void secondary_startup(void); diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 5aa8ef42717f..218715c135ed 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -115,6 +115,7 @@ ENTRY(secondary_startup) ret r12 1: bl __after_proc_init ldr r0, [r7, #16] @ set up cpu number + ldr r1, [r7, #20] @ set up task pointer ldr sp, [r7, #12] @ set up the stack pointer mov fp, #0 b secondary_start_kernel diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 0e541af738e2..4a6cb0b0808b 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -395,6 +395,7 @@ ENDPROC(secondary_startup_arm) ENTRY(__secondary_switched) ldr_l r7, secondary_data + 12 @ get secondary_data.stack ldr_l r0, secondary_data + 16 @ get secondary_data.cpu + ldr_l r1, secondary_data + 20 @ get secondary_data.task mov sp, r7 mov fp, #0 b secondary_start_kernel diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 55cb1689a4b3..5e999f1f1aea 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -154,6 +154,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir); #endif secondary_data.cpu = cpu; + secondary_data.task = idle; sync_cache_w(&secondary_data); /* @@ -375,13 +376,14 @@ void arch_cpu_idle_dead(void) * to be repeated to undo the effects of taking the CPU offline. */ __asm__("mov r0, %1\n" + " mov r1, %2\n" " mov sp, %0\n" " mov fp, #0\n" " b secondary_start_kernel" : : "r" (task_stack_page(current) + THREAD_SIZE - 8), - "r" (cpu) - : "r0"); + "r" (cpu), "r" (current) + : "r0", "r1"); } #endif /* CONFIG_HOTPLUG_CPU */ @@ -404,7 +406,7 @@ static void smp_store_cpu_info(unsigned int cpuid) * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. */ -asmlinkage void secondary_start_kernel(unsigned int cpu) +asmlinkage void secondary_start_kernel(unsigned int cpu, struct task_struct *task) { struct mm_struct *mm = &init_mm; -- 2.33.0