Re: [PATCH 2/4] ACPI hibernate: Add a mechanism to save/restore ACPI NVS memory

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thursday, 23 of October 2008, Nigel Cunningham wrote:> Hi.
Hi,
> On Wed, 2008-10-22 at 22:52 +0200, Rafael J. Wysocki wrote:> > From: Rafael J. Wysocki <rjw@xxxxxxx>> > > > ACPI hibernate: Add a mechanism to save/restore ACPI NVS memory> > > > According to the ACPI Specification 3.0b, Section 15.3.2,> > "OSPM will call the _PTS control method some time before entering a> > sleeping state, to allow the platform’s AML code to update this> > memory image before entering the sleeping state. After the system> > awakes from an S4 state, OSPM will restore this memory area and call> > the _WAK control method to enable the BIOS to reclaim its memory> > image."  For this reason, implement a mechanism allowing us to save> > the NVS memory during hibernation and to restore it during the> > subsequent resume.> > > > Based on a patch by Zhang Rui.> > > > Signed-off-by: Rafael J. Wysocki <rjw@xxxxxxx>> > Cc: Zhang Rui <rui.zhang@xxxxxxxxx>> > ---> >  drivers/acpi/sleep/main.c |   50 ++++++++++++++++---> >  include/linux/suspend.h   |   13 ++++> >  kernel/power/swsusp.c     |  121 ++++++++++++++++++++++++++++++++++++++++++++++> >  3 files changed, 177 insertions(+), 7 deletions(-)> > > > Index: linux-2.6/drivers/acpi/sleep/main.c> > ===================================================================> > --- linux-2.6.orig/drivers/acpi/sleep/main.c> > +++ linux-2.6/drivers/acpi/sleep/main.c> > @@ -321,8 +321,23 @@ void __init acpi_no_s4_hw_signature(void> >  > >  static int acpi_hibernation_begin(void)> >  {> > -	acpi_target_sleep_state = ACPI_STATE_S4;> > -	return 0;> > +	int error;> > +> > +	error = hibernate_nvs_alloc();> > +	if (!error)> > +		acpi_target_sleep_state = ACPI_STATE_S4;> > +> > +	return error;> > +}> > +> > +static int acpi_hibernation_pre_snapshot(void)> > +{> > +	int error = acpi_pm_prepare();> > +> > +	if (!error)> > +		hibernate_nvs_save();> > +> > +	return error;> >  }> >  > >  static int acpi_hibernation_enter(void)> > @@ -343,6 +358,12 @@ static int acpi_hibernation_enter(void)> >  	return ACPI_SUCCESS(status) ? 0 : -EFAULT;> >  }> >  > > +static void acpi_hibernation_finish(void)> > +{> > +	hibernate_nvs_free();> > +	acpi_pm_finish();> > +}> > +> >  static void acpi_hibernation_leave(void)> >  {> >  	/*> > @@ -358,6 +379,8 @@ static void acpi_hibernation_leave(void)> >  			"cannot resume!\n");> >  		panic("ACPI S4 hardware signature mismatch");> >  	}> > +	/* Restore the NVS memory area */> > +	hibernate_nvs_restore();> >  }> >  > >  static void acpi_pm_enable_gpes(void)> > @@ -368,8 +391,8 @@ static void acpi_pm_enable_gpes(void)> >  static struct platform_hibernation_ops acpi_hibernation_ops = {> >  	.begin = acpi_hibernation_begin,> >  	.end = acpi_pm_end,> > -	.pre_snapshot = acpi_pm_prepare,> > -	.finish = acpi_pm_finish,> > +	.pre_snapshot = acpi_hibernation_pre_snapshot,> > +	.finish = acpi_hibernation_finish,> >  	.prepare = acpi_pm_prepare,> >  	.enter = acpi_hibernation_enter,> >  	.leave = acpi_hibernation_leave,> > @@ -387,8 +410,21 @@ static int acpi_hibernation_begin_old(vo> >  {> >  	int error = acpi_sleep_prepare(ACPI_STATE_S4);> >  > > +	if (!error) {> > +		error = hibernate_nvs_alloc();> > +		if (!error)> > +			acpi_target_sleep_state = ACPI_STATE_S4;> > +	}> > +	return error;> > +}> > +> > +static int acpi_hibernation_pre_snapshot_old(void)> > +{> > +	int error = acpi_pm_disable_gpes();> > +> >  	if (!error)> > -		acpi_target_sleep_state = ACPI_STATE_S4;> > +		hibernate_nvs_save();> > +> >  	return error;> >  }> >  > > @@ -399,8 +435,8 @@ static int acpi_hibernation_begin_old(vo> >  static struct platform_hibernation_ops acpi_hibernation_ops_old = {> >  	.begin = acpi_hibernation_begin_old,> >  	.end = acpi_pm_end,> > -	.pre_snapshot = acpi_pm_disable_gpes,> > -	.finish = acpi_pm_finish,> > +	.pre_snapshot = acpi_hibernation_pre_snapshot_old,> > +	.finish = acpi_hibernation_finish,> >  	.prepare = acpi_pm_disable_gpes,> >  	.enter = acpi_hibernation_enter,> >  	.leave = acpi_hibernation_leave,> > Index: linux-2.6/include/linux/suspend.h> > ===================================================================> > --- linux-2.6.orig/include/linux/suspend.h> > +++ linux-2.6/include/linux/suspend.h> > @@ -233,6 +233,11 @@ extern unsigned long get_safe_page(gfp_t> >  extern void hibernation_set_ops(struct platform_hibernation_ops *ops);> >  extern int hibernate(void);> >  extern bool system_entering_hibernation(void);> > +extern int hibernate_nvs_register(unsigned long start, unsigned long size);> > +extern int hibernate_nvs_alloc(void);> > +extern void hibernate_nvs_free(void);> > +extern void hibernate_nvs_save(void);> > +extern void hibernate_nvs_restore(void);> >  #else /* CONFIG_HIBERNATION */> >  static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }> >  static inline void swsusp_set_page_free(struct page *p) {}> > @@ -241,6 +246,14 @@ static inline void swsusp_unset_page_fre> >  static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}> >  static inline int hibernate(void) { return -ENOSYS; }> >  static inline bool system_entering_hibernation(void) { return false; }> > +static inline int hibernate_nvs_register(unsigned long a, unsigned long b)> > +{> > +	return 0;> > +}> > +static inline int hibernate_nvs_alloc(void) { return 0; }> > +static inline void hibernate_nvs_free(void) {}> > +static inline void hibernate_nvs_save(void) {}> > +static inline void hibernate_nvs_restore(void) {}> >  #endif /* CONFIG_HIBERNATION */> >  > >  #ifdef CONFIG_PM_SLEEP> > Index: linux-2.6/kernel/power/swsusp.c> > ===================================================================> > --- linux-2.6.orig/kernel/power/swsusp.c> > +++ linux-2.6/kernel/power/swsusp.c> > @@ -262,3 +262,124 @@ int swsusp_shrink_memory(void)> >  > >  	return 0;> >  }> > +> > +/*> > + * Platforms, like ACPI, may want us to save some memory used by them during> > + * hibernation and to restore the contents of this memory during the subsequent> > + * resume.  The code below implements a mechanism allowing us to do that.> > + */> > +> > +struct nvs_page {> > +	unsigned long phys_start;> > +	unsigned int size;> > +	void *kaddr;> > +	void *data;> > +	struct list_head node;> > +};> > +> > +static LIST_HEAD(nvs_list);> > +> > +/**> > + *	hibernate_nvs_register - register platform NVS memory region to save> > + *	@start - physical address of the region> > + *	@size - size of the region> > + *> > + *	The NVS region need not be page-aligned (both ends) and we arrange> > + *	things so that the data from page-aligned addresses in this region will> > + *	be copied into separate RAM pages.> > + */> > +int hibernate_nvs_register(unsigned long start, unsigned long size)> > +{> > +	struct nvs_page *entry, *next;> > +> > +	while (size > 0) {> > +		unsigned int nr_bytes;> > +> > +		entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);> > +		if (!entry)> > +			goto Error;> > +> > +		list_add_tail(&entry->node, &nvs_list);> > +		entry->phys_start = start;> > +		nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);> > +		entry->size = (size < nr_bytes) ? size : nr_bytes;> > +> > +		start += entry->size;> > +		size -= entry->size;> > +	}> > +	return 0;> > +> > + Error:> > +	list_for_each_entry_safe(entry, next, &nvs_list, node) {> > +		list_del(&entry->node);> > +		kfree(entry);> > +	}> > +	return -ENOMEM;> > +}> > +> > +/**> > + *	hibernate_nvs_free - free data pages allocated for saving NVS regions> > + *> > + *	It is allowed to call this function many times in a row.> > + */> > If this one is safe for multiple calls, should nvs_alloc be safe for> multiple calls too? Or, conversely, is something funny going on such> that hibernate_nvs_free needs to be safe for multiple calls?
I thought it would be necessary to call that additionally from platform_end()for the error path in which platform_pre_snapshot() fails, but that doesn'tseem to be the case.  It doesn't hurt to keep it in this form, though, IMO.
> > +void hibernate_nvs_free(void)> > +{> > +	struct nvs_page *entry;> > +> > +	list_for_each_entry(entry, &nvs_list, node)> > +		if (entry->data) {> > +			free_page((unsigned long)entry->data);> > +			entry->data = NULL;> > +			if (entry->kaddr) {> > +				iounmap(entry->kaddr);> > +				entry->kaddr = NULL;> > +			}> > +		}> > +}> > +> > +/**> > + *	hibernate_nvs_alloc - allocate memory necessary for saving NVS regions> > + */> > +int hibernate_nvs_alloc(void)> > +{> > +	struct nvs_page *entry;> > +> > +	list_for_each_entry(entry, &nvs_list, node) {> > +		entry->data = (void *)__get_free_page(GFP_KERNEL);> > +		if (!entry->data) {> > +			hibernate_nvs_free();> > +			return -ENOMEM;> > +		}> > +	}> > +	return 0;> > +}> > +> > +/**> > + *	hibernate_nvs_save - save NVS memory regions> > + */> > +void hibernate_nvs_save(void)> > +{> > +	struct nvs_page *entry;> > +> > +	printk(KERN_INFO "PM: Saving platform NVS memory\n");> > +> > +	list_for_each_entry(entry, &nvs_list, node)> > +		if (entry->data) {> > +			entry->kaddr = ioremap(entry->phys_start, entry->size);> > +			memcpy(entry->data, entry->kaddr, entry->size);> > Why not unmap here? (I know you're delaying the unmap until the free,> but it's probably clearer to map and unmap each time the memory is> accessed (ie in restore too).
Ah, this one is tricky.  We cannot unmap from atomic context. :-)
> > +		}> > +}> > +> > +/**> > + *	hibernate_nvs_restore - restore NVS memory regions> > + */> > +void hibernate_nvs_restore(void)> > +{> > +	struct nvs_page *entry;> > +> > +	printk(KERN_INFO "PM: Restoring platform NVS memory\n");> > +> > +	list_for_each_entry(entry, &nvs_list, node)> > +		if (entry->data)> > +			memcpy(entry->kaddr, entry->data, entry->size);> > +}
Thanks,Rafael_______________________________________________linux-pm mailing listlinux-pm@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx://lists.linux-foundation.org/mailman/listinfo/linux-pm


[Index of Archives]     [Linux ACPI]     [Netdev]     [Ethernet Bridging]     [Linux Wireless]     [CPU Freq]     [Kernel Newbies]     [Fedora Kernel]     [Security]     [Linux for Hams]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux Admin]     [Samba]

  Powered by Linux