On Mon, 23 May 2011, Russell King - ARM Linux wrote:
On Mon, May 23, 2011 at 02:37:19PM +0100, Frank Hofmann wrote:
What I've found necessary to save/restore via swsusp_arch_suspend/resume
are the SYSTEM_MODE and SVC_MODE registers.
Yesterday, I had thought that cpu_init() resets SVC_MODE sufficiently but
that doesn't seem to be the case, if I leave that out, resume-from-disk
doesn't work anymore.
You will be running in SVC mode, so the SVC mode registers are your
current register set. At some point you need to do an effective
"context switch" between the kernel doing the resume and the kernel
which was running. That involves restoring the saved register state.
System mode on the other hand is unused by the kernel.
Ah, and I had it the other way round ... that's why. Thanks !
I've tried that, saving/restoring just CPSR/SPSR and the reg set - and
that seems sufficient, works fine !
All this means that the basic code has again become smaller.
Attached is a new version, integrating all the feedback so far:
* save/restore only those parts of the register set that the kernel cannot
reinitialize from scratch
* take care of FIQ disable/enable bracketing
* use traditional stmfd/ldmfd instead of push/pop
* don't rely on thread state, current->active_mm, but use global &init_mm
* dump arch_prepare_suspend (skipping ahead of Rafael's suggested fix)
* ditch the vmlinux.lds changes as they're not needed
What other outstanding things are there to address for this ?
All the best,
FrankH.
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 6b6786c..859dd86 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -198,6 +198,9 @@ config VECTORS_BASE
config ARCH_HAS_CPU_IDLE_WAIT
def_bool y
+config ARCH_HIBERNATION_POSSIBLE
+ def_bool n
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 5421d82..23e93a6 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -191,6 +191,7 @@ static inline void *phys_to_virt(unsigned long x)
*/
#define __pa(x) __virt_to_phys((unsigned long)(x))
#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
+#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
/*
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index c9b00bb..541ac3a 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_ARM_UNWIND) += unwind.o
obj-$(CONFIG_HAVE_TCM) += tcm.o
+obj-$(CONFIG_HIBERNATION) += cpu.o swsusp.o
obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
diff --git a/arch/arm/kernel/cpu.c b/arch/arm/kernel/cpu.c
new file mode 100644
index 0000000..0f1c31f
--- /dev/null
+++ b/arch/arm/kernel/cpu.c
@@ -0,0 +1,64 @@
+/*
+ * Hibernation support specific for ARM
+ *
+ * Derived from work on ARM hibernation support by:
+ *
+ * Ubuntu project, hibernation support for mach-dove
+ * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
+ * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
+ * https://lkml.org/lkml/2010/6/18/4
+ * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
+ * https://patchwork.kernel.org/patch/96442/
+ *
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@xxxxxxx>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+#include <asm/tlbflush.h>
+
+extern const void __nosave_begin, __nosave_end;
+
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
+ unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
+
+ return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
+}
+
+void save_processor_state(void)
+{
+ flush_thread();
+ local_fiq_disable();
+}
+
+void restore_processor_state(void)
+{
+ local_flush_tlb_all();
+ local_fiq_enable();
+}
+
+u8 __swsusp_arch_ctx[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+u8 __swsusp_resume_stk[PAGE_SIZE/2] __nosavedata;
+
+/*
+ * The framework loads the hibernation image into this linked list,
+ * for swsusp_arch_resume() to copy back to the proper destinations.
+ *
+ * To make this work if resume is triggered from initramfs, the
+ * pagetables need to be switched to allow writes to kernel mem.
+ */
+void notrace __swsusp_arch_restore_image(void)
+{
+ extern struct pbe *restore_pblist;
+ struct pbe *pbe;
+
+ cpu_switch_mm(__virt_to_phys(swapper_pg_dir), &init_mm);
+
+ for (pbe = restore_pblist; pbe; pbe = pbe->next)
+ copy_page(pbe->orig_address, pbe->address);
+}
diff --git a/arch/arm/kernel/swsusp.S b/arch/arm/kernel/swsusp.S
new file mode 100644
index 0000000..1fc0e33
--- /dev/null
+++ b/arch/arm/kernel/swsusp.S
@@ -0,0 +1,72 @@
+/*
+ * Hibernation support specific for ARM
+ *
+ * Based on work by:
+ *
+ * Ubuntu project, hibernation support for mach-dove,
+ * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
+ * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
+ * https://lkml.org/lkml/2010/6/18/4
+ * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
+ * https://patchwork.kernel.org/patch/96442/
+ *
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@xxxxxxx>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/linkage.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+
+/*
+ * Save the current CPU state before suspend / poweroff.
+ */
+ENTRY(swsusp_arch_suspend)
+ ldr r0, =__swsusp_arch_ctx
+ mrs r1, cpsr
+ mrs r2, spsr
+ stmia r0!, {r1-r11,lr} @ CPSR, SPSR, nonvolatile regs
+ str sp, [r0], #4 @ stack
+ stmfd sp!, {lr}
+ bl __save_processor_state @ machine-specific state
+ ldmfd sp!, {lr}
+ b swsusp_save @ let framework write snapshot out
+ENDPROC(swsusp_arch_suspend)
+
+/*
+ * Restore the memory image from the pagelists, and load the CPU registers
+ * from saved state.
+ */
+ENTRY(swsusp_arch_resume)
+ /*
+ * Switch stack to a nosavedata region to make sure image restore
+ * doesn't clobber it underneath itself.
+ */
+ ldr sp, =(__swsusp_resume_stk + PAGE_SIZE / 2)
+ bl __swsusp_arch_restore_image
+
+ /*
+ * Restore the CPU registers.
+ */
+ ldr r0, =__swsusp_arch_ctx
+ ldmia r0!, {r1,r2} @ CPSR / SPSR
+ msr cpsr, r1
+ msr spsr, r2
+ ldr r0, =__swsusp_arch_ctx @ reload in case regset switched
+ ldmia r0!, {r1-r11,lr} @ nonvolatile regs
+ ldr sp, [r0], #4 @ stack
+
+ /*
+ * From here on we have a valid stack again. Core state is
+ * not restored yet, redirect to the machine-specific
+ * implementation to get that done.
+ * Resume has succeeded at this point; if the machine-specific
+ * code wants to fail it needs to panic.
+ */
+ mov r1, #0
+ stmfd sp!, {r1,lr}
+ bl __restore_processor_state @ machine-specific state
+ bl cpu_init @ reinitialize other modes
+ ldmfd sp!, {r0,pc}
+ENDPROC(swsusp_arch_resume)
_______________________________________________
linux-pm mailing list
linux-pm@xxxxxxxxxxxxxxxxxxxxxxxxxx
https://lists.linux-foundation.org/mailman/listinfo/linux-pm