Updated version of yesterday's core ARM hibernation code bits.I've found that the use of cpu_switch_mm() breaks resume on OMAP3; leaving it out for now. It's unnecessary for direct resume (via kernel command line or TuxOnIce), only needed for uswsusp.
Also, me bad, apologies for posting refactored code one too early ... I've tried many ways of getting the actual swsusp_arch_suspend/resume entrypoints to be C code (and only the absolute essentials assembly), but for the case of swsusp_arch_suspend, assembly entry is mandatory because the framework relies on resuming into to the caller (i.e. no change can be made to the return address / LR reg before storing it, that makes it impossible to use a C wrapper function).
Thanks for any thoughts, feedback, reviews on this. FrankH.
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 6b6786c..b3c271f 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -195,6 +195,14 @@ config VECTORS_BASE help The base address of exception vectors. +config ARCH_HIBERNATION_POSSIBLE + bool + depends on !SMP + help + If the machine architecture supports suspend-to-disk + it should select this automatically for you. + Otherwise, say 'Y' at your own peril. + config ARCH_HAS_CPU_IDLE_WAIT def_bool y diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 5421d82..23e93a6 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -191,6 +191,7 @@ static inline void *phys_to_virt(unsigned long x) */ #define __pa(x) __virt_to_phys((unsigned long)(x)) #define __va(x) ((void *)__phys_to_virt((unsigned long)(x))) +#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0)) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) /* diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h new file mode 100644 index 0000000..8857c79 --- /dev/null +++ b/arch/arm/include/asm/suspend.h @@ -0,0 +1,6 @@ +#ifndef __ASM_ARM_SUSPEND_H +#define __ASM_ARM_SUSPEND_H + +static inline int arch_prepare_suspend(void) { return 0; } + +#endif /* __ASM_ARM_SUSPEND_H */ diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index c9b00bb..541ac3a 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -36,6 +36,7 @@ obj-$(CONFIG_ARM_THUMBEE) += thumbee.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_ARM_UNWIND) += unwind.o obj-$(CONFIG_HAVE_TCM) += tcm.o +obj-$(CONFIG_HIBERNATION) += cpu.o swsusp.o obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312 diff --git a/arch/arm/kernel/cpu.c b/arch/arm/kernel/cpu.c new file mode 100644 index 0000000..07d2f45 --- /dev/null +++ b/arch/arm/kernel/cpu.c @@ -0,0 +1,36 @@ +/* + * Hibernation support specific for ARM + * + * Based on work by: + * + * Ubuntu project, hibernation support for mach-dove, + * https://lkml.org/lkml/2010/6/18/4 + * + * Copyright (C) 2010 Nokia Corporation + * Contact: Hiroshi DOYU <Hiroshi.DOYU@xxxxxxxxx> + * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html + * + * Copyright (C) 2010 Texas Instruments, Inc. + * via linux-omap mailing list, Teerth Reddy et al. + * https://patchwork.kernel.org/patch/96442/ + * + * Copyright (C) 2006 Rafael J. Wysocki <rjw@xxxxxxx> + * + * License terms: GNU General Public License (GPL) version 2 + */ + +#include <linux/mm.h> + +/* References to section boundaries */ +extern const void __nosave_begin, __nosave_end; + +/* + * pfn_is_nosave - check if given pfn is in the 'nosave' section + */ +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; + unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; + + return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); +} diff --git a/arch/arm/kernel/swsusp.S b/arch/arm/kernel/swsusp.S new file mode 100644 index 0000000..b3ed115 --- /dev/null +++ b/arch/arm/kernel/swsusp.S @@ -0,0 +1,167 @@ +/* + * Hibernation support specific for ARM + * + * Based on work by: + * + * Ubuntu project, hibernation support for mach-dove, + * https://lkml.org/lkml/2010/6/18/4 + * + * Copyright (C) 2010 Nokia Corporation + * Contact: Hiroshi DOYU <Hiroshi.DOYU@xxxxxxxxx> + * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html + * + * Copyright (C) 2010 Texas Instruments, Inc. + * via linux-omap mailing list, Teerth Reddy et al. + * https://patchwork.kernel.org/patch/96442/ + * + * Copyright (C) 2006 Rafael J. Wysocki <rjw@xxxxxxx> + * + * License terms: GNU General Public License (GPL) version 2 + */ + +#include <linux/linkage.h> +#include <asm/assembler.h> +#include <asm/cache.h> +#include <asm/memory.h> +#include <asm/page.h> +#include <asm/ptrace.h> + + +/* + * Force ARM mode because: + * - we use PC-relative addressing with >8bit offsets + * - we use msr with immediates + */ +.arm + +.align PAGE_SHIFT +.Lswsusp_page_start: + +/* + * Save the current CPU state before suspend / poweroff. + */ +ENTRY(swsusp_arch_suspend) + adr r0, ctx + mrs r1, cpsr + stm r0!, {r1} /* current CPSR */ + msr cpsr_c, #SYSTEM_MODE + stm r0!, {r0-r14} /* user regs */ + msr cpsr_c, #SVC_MODE + mrs r2, spsr + stm r0!, {r2, sp, lr} /* SVC SPSR, SVC regs */ + msr cpsr, r1 /* restore original mode */ + + stmfd sp!, {lr} + bl __save_processor_state + ldmfd sp!, {lr} + b swsusp_save /* this will also set the return code */ +ENDPROC(swsusp_arch_suspend) + + +/* + * Restore the memory image from the pagelists, and load the CPU registers + * from saved state. + * This runs in a very restrictive context - namely, no stack can be used + * before the CPU register state saved by swsusp_arch_suspend() has been + * restored. + */ +ENTRY(swsusp_arch_resume) + /* + * TODO: Ubuntu mach-dove suspend-to-disk code sets pagedir to swapper + * (so that resume via initramfs can work). The code is equivalent to: + * + * cpu_switch_mm(__pa(swapper_pg_dir), current->active_mm); + * + * It's not directly callable from asm (active_mm is a macro in a + * nonexported header, proc-fns.h, while cpu_switch_mm is a macro + * for C use only). + * To enable this and prevent direct CPU-dependent manipulation of + * MMU registers, header file changes are required. + * + * FIXME: Supplying the code breaks resume on OMAP3. For now, don't. + */ +#ifdef NOTYET + act_mm r1 + ldr r0, =__virt_to_phys(swapper_pg_dir) + cpu_switch_mm +#endif + + /* + * The following code is an assembly version of: + * + * struct pbe *pbe; + * for (pbe = restore_pblist; pbe != NULL; pbe = pbe->next) + * copy_page(pbe->orig_address, pbe->address); + * + * Because this is the very place where data pages, including our stack, + * are overwritten, function calls are obviously impossible. Hence asm. + * + * The core of the loop is taken almost verbatim from copy_page.S. + */ + ldr r1, =(restore_pblist - 8) /* "fake" pbe->next */ + b 3f +.ltorg +.align L1_CACHE_SHIFT +0: +PLD( pld [r0, #0] ) +PLD( pld [r0, #L1_CACHE_BYTES] ) + mov r3, #(PAGE_SIZE / (2 * L1_CACHE_BYTES) PLD( -1 )) + ldmia r0!, {r4-r7} +1: +PLD( pld [r0, #(2 * L1_CACHE_BYTES)] ) +PLD( pld [r0, #(3 * L1_CACHE_BYTES)] ) +2: +.rept (2 * L1_CACHE_BYTES / 16 - 1) + stmia r2!, {r4-r7} + ldmia r0!, {r4-r7} +.endr + subs r3, r3, #1 + stmia r2!, {r4-r7} + ldmgtia r0!, {r4-r7} + bgt 1b +PLD( ldmeqia r0!, {r4-r7} ) +PLD( beq 2b ) +3: + ldr r1, [r1, #8] /* load next in list (pbe->next) */ + cmp r1, #0 + ldrne r0, [r1] /* src page start address (pbe->address) */ + ldrne r2, [r1, #4] /* dst page start address (pbe->orig_address) */ + bne 0b + + /* + * Done - now restore the CPU state and return. + */ + msr cpsr_c, #SYSTEM_MODE + adr r0, ctx + ldm r0!, {r1, sp, lr} /* first word is CPSR, following are r0/r1 (irrelevant) */ + msr cpsr_cxsf, r1 + ldm r0!, {r2-r14} + msr cpsr_c, #SVC_MODE + ldm r0!, {r2, sp, lr} + msr spsr_cxsf, r2 + msr cpsr_c, r1 /* use CPSR from above */ + + /* + * From here on we have a valid stack again. Core state is + * not restored yet, redirect to the machine-specific + * implementation to get that done. + * Note that at this point we have succeeded with restore; + * if machine-specific code fails it'd need to panic, there + * is no way anymore now to recover from "resume failure". + */ + mov r1, #0 + stmfd sp!, {r1,lr} + bl __restore_processor_state /* restore core state */ + ldmfd sp!, {r0,pc} +ENDPROC(swsusp_arch_resume) + +.ltorg + +/* + * Save the CPU context (register set for all modes and mach-specific cp regs) + * here. Setting aside what remains of this CPU page, should be aplenty. + */ +.align L1_CACHE_SHIFT +ENTRY(ctx) +.space (PAGE_SIZE - (. - .Lswsusp_page_start)) +END(ctx) diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 4957e13..e691c77 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -153,7 +153,6 @@ SECTIONS __init_end = .; #endif - NOSAVE_DATA CACHELINE_ALIGNED_DATA(32) /* @@ -176,6 +175,8 @@ SECTIONS } _edata_loc = __data_loc + SIZEOF(.data); + NOSAVE_DATA + #ifdef CONFIG_HAVE_TCM /* * We align everything to a page boundary so we can diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index b6e818f..0d39ae0 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -171,7 +171,7 @@ #define NOSAVE_DATA \ . = ALIGN(PAGE_SIZE); \ VMLINUX_SYMBOL(__nosave_begin) = .; \ - *(.data.nosave) \ + .data.nosave : { *(.data.nosave) } \ . = ALIGN(PAGE_SIZE); \ VMLINUX_SYMBOL(__nosave_end) = .; diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 5e781d8..476d4c3 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -274,8 +274,13 @@ static inline void hibernate_nvs_restore(void) {} #endif /* CONFIG_HIBERNATION_NVS */ #ifdef CONFIG_PM_SLEEP +#ifndef CONFIG_ARM void save_processor_state(void); void restore_processor_state(void); +#else +#define save_processor_state preempt_disable +#define restore_processor_state preempt_enable +#endif /* kernel/power/main.c */ extern int register_pm_notifier(struct notifier_block *nb);
_______________________________________________ linux-pm mailing list linux-pm@xxxxxxxxxxxxxxxxxxxxxxxxxx https://lists.linux-foundation.org/mailman/listinfo/linux-pm