Hi,
time for another round on this one...
This got quite a bit cleaned up now.
There's now no more need for a "swsusp context" at all. The code uses
cpu_suspend/resume and keeps the snapshot state on the stack while
writing it out.
There are a few dependencies this patch brings in:
* due to the use of cpu_suspend / cpu_resume, it'll only apply as-is
to kernels no older than f6b0fa02e8b0708d17d631afce456524eadf87ff,
where Russell King introduced the generic interface.
Patching these into older kernels is a little work.
* it temporarily uses swapper_pg_dir and establishes 1:1 mappings there
for a MMU-off transition, which is necessary before resume.
In order to tear these down afterwards, identity_mapping_del() needs
to be called; for some reason that's #ifdef CONFIG_SMP ...
* it needs to "catch" sleep_save_sp after cpu_suspend() so that resume
can be provided with the proper starting point.
This requires an ENTRY(sleep_save_sp) in arch/arm/kernel/sleep.S so
that the symbol becomes public.
* it assumes cpu_reset will disable the MMU. cpu_v6_reset/cpu_v7_reset
are currently not doing so (amongst some other minor chip types).
* there's kind of a circular dependency between CONFIG_HIBERNATION and
CONFIG_PM_SLEEP, on ARM. The latter is necessary so that cpu_suspend
and cpu_resume are compiled in, but it cannot be selected via
ARCH_HIBERNATION_POSSIBLE because CONFIG_PM_SLEEP depends on
CONFIG_HIBERNATION_INTERFACE - selected by CONFIG_HIBERNATION.
Consequence is that right now, both CONFIG_PM_SLEEP and ...HIBERNATION
must be set in your defconfig file to be able to compile.
(my head swirls from writing this ...)
Otherwise, this is by far the cleanest in the series yet.
I've tested this on ARM1176; still need to do OMAP3 (Cortex-A8), will
report on that.
Please let me know what you think,
FrankH.
arch/arm/include/asm/memory.h | 1 +
arch/arm/kernel/Makefile | 1 +
arch/arm/mm/Kconfig | 5 ++
arch/arm/kernel/cpu.c | 94 +++++++++++++++++++++++++++++++++++++++++
arch/arm/kernel/swsusp.S | 84 ++++++++++++++++++++++++++++++++++++
5 files changed, 185 insertions(+), 0 deletions(-)
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 431077c..c7ef454 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -250,6 +250,7 @@ static inline void *phys_to_virt(phys_addr_t x)
*/
#define __pa(x) __virt_to_phys((unsigned long)(x))
#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
+#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
/*
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 8d95446..b76a403 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
obj-$(CONFIG_PM_SLEEP) += sleep.o
+obj-$(CONFIG_HIBERNATION) += cpu.o swsusp.o
obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
obj-$(CONFIG_SMP) += smp.o smp_tlb.o
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 0074b8d..c668f8f 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -627,6 +627,11 @@ config CPU_USE_DOMAINS
config IO_36
bool
+config ARCH_HIBERNATION_POSSIBLE
+ bool
+ depends on MMU
+ default y if CPU_ARM920T || CPU_ARM926T || CPU_SA1100 || CPU_XSCALE || CPU_XSC3 || CPU_V6 || CPU_V6K || CPU_V7
+
comment "Processor Features"
config ARM_THUMB
diff --git a/arch/arm/kernel/cpu.c b/arch/arm/kernel/cpu.c
new file mode 100644
index 0000000..2cdfa85
--- /dev/null
+++ b/arch/arm/kernel/cpu.c
@@ -0,0 +1,94 @@
+/*
+ * Hibernation support specific for ARM
+ *
+ * Derived from work on ARM hibernation support by:
+ *
+ * Ubuntu project, hibernation support for mach-dove
+ * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
+ * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
+ * https://lkml.org/lkml/2010/6/18/4
+ * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
+ * https://patchwork.kernel.org/patch/96442/
+ *
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@xxxxxxx>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/pgalloc.h>
+#include <asm/sections.h>
+
+extern const void __nosave_begin, __nosave_end;
+
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
+ unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
+
+ return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
+}
+
+void notrace save_processor_state(void)
+{
+ flush_thread();
+ local_fiq_disable();
+}
+
+void notrace restore_processor_state(void)
+{
+ flush_tlb_all();
+ flush_cache_all();
+ local_fiq_enable();
+}
+
+u8 __swsusp_resume_stk[PAGE_SIZE/2] __nosavedata;
+u32 __swsusp_save_sp;
+
+int __swsusp_arch_resume_finish(void)
+{
+ identity_mapping_del(swapper_pg_dir, __pa(_stext), __pa(_etext));
+ return 0;
+}
+
+/*
+ * The framework loads the hibernation image into a linked list anchored
+ * at restore_pblist, for swsusp_arch_resume() to copy back to the proper
+ * destinations.
+ *
+ * To make this work if resume is triggered from initramfs, the
+ * pagetables need to be switched to allow writes to kernel mem.
+ */
+void notrace __swsusp_arch_restore_image(void)
+{
+ extern struct pbe *restore_pblist;
+ extern void cpu_resume(void);
+ extern unsigned long sleep_save_sp;
+ struct pbe *pbe;
+ typeof(cpu_reset) *phys_reset = (typeof(cpu_reset) *)virt_to_phys(cpu_reset);
+
+ cpu_switch_mm(swapper_pg_dir, &init_mm);
+
+ for (pbe = restore_pblist; pbe; pbe = pbe->next)
+ copy_page(pbe->orig_address, pbe->address);
+
+ sleep_save_sp = __swsusp_save_sp;
+ flush_tlb_all();
+ flush_cache_all();
+
+ identity_mapping_add(swapper_pg_dir, __pa(_stext), __pa(_etext));
+
+ flush_tlb_all();
+ flush_cache_all();
+ cpu_proc_fin();
+
+ flush_tlb_all();
+ flush_cache_all();
+
+ phys_reset(virt_to_phys(cpu_resume));
+}
+
diff --git a/arch/arm/kernel/swsusp.S b/arch/arm/kernel/swsusp.S
new file mode 100644
index 0000000..c3a4b83
--- /dev/null
+++ b/arch/arm/kernel/swsusp.S
@@ -0,0 +1,84 @@
+/*
+ * Hibernation support specific for ARM
+ *
+ * Based on work by:
+ *
+ * Ubuntu project, hibernation support for mach-dove,
+ * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
+ * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
+ * https://lkml.org/lkml/2010/6/18/4
+ * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
+ * https://patchwork.kernel.org/patch/96442/
+ *
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@xxxxxxx>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/linkage.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+#include <asm/assembler.h>
+
+/*
+ * Save the current CPU state before suspend / poweroff.
+ * cpu_suspend() allocates space on the stack to save all necessary
+ * information. This has two consequences:
+ * - swsusp_save() has to be called without changing anything on
+ * the stack. One cannot just return into it.
+ * - should swsusp_save() fail for some reason, the previous value
+ * of sp has to be restored from a safe place.
+ */
+ENTRY(swsusp_arch_suspend)
+ mrs r1, cpsr
+ mrs r2, spsr
+ stmfd sp!, {r1-r11,lr} @ save registers
+ ldr r0, =.Ltemp_sp
+ str sp, [r0] @ temp
+ ldr r1, =(PHYS_OFFSET - PAGE_OFFSET)
+ adr r3, .Lresume_post_mmu @ resume here
+ bl cpu_suspend @ snapshot state (to stack)
+ ldr r1, =sleep_save_sp
+ ldr r0, =__swsusp_save_sp
+ ldr r2, [r1]
+ str r2, [r0]
+ bl swsusp_save @ write snapshot
+ ldr r1, =.Ltemp_sp
+ ldr sp, [r1] @ restore stack
+ ldmfd sp!, {r1-r11, pc} @ return
+ENDPROC(swsusp_arch_suspend)
+
+/*
+ * Restore the memory image from the pagelists, and load the CPU registers
+ * from saved state.
+ */
+ENTRY(swsusp_arch_resume)
+ setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r2
+ /*
+ * Switch stack to a nosavedata region to make sure image restore
+ * doesn't clobber it underneath itself.
+ * Note that this effectively nukes "current"; from here on, the
+ * executing code runs context-less and no functions can be called
+ * that have side effects beyond accessing global variables.
+ */
+ ldr sp, =(__swsusp_resume_stk + PAGE_SIZE / 2)
+ b __swsusp_arch_restore_image
+.ltorg
+.align 5
+ /*
+ * Execution returns here via resuming the saved context.
+ * MMU is active again and CPU core state has been restored, all
+ * that remains to be done now is to restore the CPU registers.
+ */
+.Lresume_post_mmu:
+ ldmfd sp!, {r1-r11}
+ msr cpsr, r1
+ msr spsr, r2
+ bl cpu_init @ reinitialize other modes
+ ldmfd sp!, {lr}
+ b __swsusp_arch_resume_finish @ cleanup
+ENDPROC(swsusp_arch_resume)
+
+.data
+.Ltemp_sp:
+ .long 0
_______________________________________________
linux-pm mailing list
linux-pm@xxxxxxxxxxxxxxxxxxxxxxxxxx
https://lists.linux-foundation.org/mailman/listinfo/linux-pm