This is a note to let you know that I've just added the patch titled arm64: kernel: rename __cpu_suspend to keep it aligned with arm to the 4.1-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: arm64-kernel-rename-__cpu_suspend-to-keep-it-aligned-with-arm.patch and it can be found in the queue-4.1 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From af391b15f7b56ce19f52862d36595637dd42b575 Mon Sep 17 00:00:00 2001 From: Sudeep Holla <sudeep.holla@xxxxxxx> Date: Thu, 18 Jun 2015 15:41:32 +0100 Subject: arm64: kernel: rename __cpu_suspend to keep it aligned with arm From: Sudeep Holla <sudeep.holla@xxxxxxx> commit af391b15f7b56ce19f52862d36595637dd42b575 upstream. This patch renames __cpu_suspend to cpu_suspend so that it's aligned with ARM32. It also removes the redundant wrapper created. This is in preparation to implement generic PSCI system suspend using the cpu_{suspend,resume} which now has the same interface on both ARM and ARM64. Cc: Mark Rutland <mark.rutland@xxxxxxx> Reviewed-by: Lorenzo Pieralisi <lorenzo.pieralisi@xxxxxxx> Reviewed-by: Ashwin Chaugule <ashwin.chaugule@xxxxxxxxxx> Signed-off-by: Sudeep Holla <sudeep.holla@xxxxxxx> Signed-off-by: Catalin Marinas <catalin.marinas@xxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/arm64/include/asm/cpuidle.h | 8 ++------ arch/arm64/include/asm/suspend.h | 2 +- arch/arm64/kernel/cpuidle.c | 4 ++-- arch/arm64/kernel/psci.c | 2 +- arch/arm64/kernel/suspend.c | 6 +++--- 5 files changed, 9 insertions(+), 13 deletions(-) --- a/arch/arm64/include/asm/cpuidle.h +++ b/arch/arm64/include/asm/cpuidle.h @@ -5,20 +5,16 @@ #ifdef CONFIG_CPU_IDLE extern int arm_cpuidle_init(unsigned int cpu); -extern int cpu_suspend(unsigned long arg); +extern int arm_cpuidle_suspend(int index); #else static inline int arm_cpuidle_init(unsigned int cpu) { return -EOPNOTSUPP; } -static inline int cpu_suspend(unsigned long arg) +static inline int arm_cpuidle_suspend(int index) { return -EOPNOTSUPP; } #endif -static inline int arm_cpuidle_suspend(int index) -{ - return cpu_suspend(index); -} #endif --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h @@ -21,6 +21,6 @@ struct sleep_save_sp { phys_addr_t save_ptr_stash_phys; }; -extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); +extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); extern void cpu_resume(void); #endif --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -37,7 +37,7 @@ int arm_cpuidle_init(unsigned int cpu) * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU * operations back-end error code otherwise. */ -int cpu_suspend(unsigned long arg) +int arm_cpuidle_suspend(int index) { int cpu = smp_processor_id(); @@ -47,5 +47,5 @@ int cpu_suspend(unsigned long arg) */ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) return -EOPNOTSUPP; - return cpu_ops[cpu]->cpu_suspend(arg); + return cpu_ops[cpu]->cpu_suspend(index); } --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -546,7 +546,7 @@ static int __maybe_unused cpu_psci_cpu_s if (state[index - 1].type == PSCI_POWER_STATE_TYPE_STANDBY) ret = psci_ops.cpu_suspend(state[index - 1], 0); else - ret = __cpu_suspend(index, psci_suspend_finisher); + ret = cpu_suspend(index, psci_suspend_finisher); return ret; } --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -51,13 +51,13 @@ void __init cpu_suspend_set_dbg_restorer } /* - * __cpu_suspend + * cpu_suspend * * arg: argument to pass to the finisher function * fn: finisher function pointer * */ -int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) +int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { struct mm_struct *mm = current->active_mm; int ret; @@ -82,7 +82,7 @@ int __cpu_suspend(unsigned long arg, int * We are resuming from reset with TTBR0_EL1 set to the * idmap to enable the MMU; restore the active_mm mappings in * TTBR0_EL1 unless the active_mm == &init_mm, in which case - * the thread entered __cpu_suspend with TTBR0_EL1 set to + * the thread entered cpu_suspend with TTBR0_EL1 set to * reserved TTBR0 page tables and should be restored as such. */ if (mm == &init_mm) Patches currently in stable-queue which might be from sudeep.holla@xxxxxxx are queue-4.1/arm64-kernel-rename-__cpu_suspend-to-keep-it-aligned-with-arm.patch -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html