[RFC PATCH 1/4] mm/memory_hotplug: Add MHP_ALLOCATE flag which treats hotplugged memory as allocated

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



When memory is added by hotplug, it is treated as free by default.
This patch adds a flag MHP_ALLOCATE, which can be used with the
add_memory_...() memory hotplug functions to treat the new memory as
allocated. This allows the memory contents to be inspected before
being freed for system use. This feature is useful when the hotplugged
memory is persistent, since it can be scanned for any persistent data
which should be retained, rather than freed up for system use and
overwritten.

Signed-off-by: Srinivas Aji <srinivas.aji@xxxxxxxxxxxx>
---
 include/linux/memory_hotplug.h |  4 ++++
 mm/internal.h                  |  3 ++-
 mm/memory_hotplug.c            | 15 +++++++++++++--
 mm/page_alloc.c                | 19 ++++++++++++-------
 4 files changed, 31 insertions(+), 10 deletions(-)

diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 20d7edf62a6a..15c9b1f2a5be 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -113,6 +113,10 @@ typedef int __bitwise mhp_t;
  * implies the node id (nid).
  */
 #define MHP_NID_IS_MGID		((__force mhp_t)BIT(2))
+/*
+ * Online this memory as when added, and also treat these pages as allocated.
+ */
+#define MHP_ALLOCATE		((__force mhp_t)BIT(3))
 
 /*
  * Extended parameters for memory hotplug:
diff --git a/mm/internal.h b/mm/internal.h
index c0f8fbe0445b..ee37749e341e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -356,7 +356,8 @@ extern void __putback_isolated_page(struct page *page, unsigned int order,
 				    int mt);
 extern void memblock_free_pages(struct page *page, unsigned long pfn,
 					unsigned int order);
-extern void __free_pages_core(struct page *page, unsigned int order);
+extern void __free_pages_core(struct page *page, unsigned int order,
+			      int allocate);
 extern void prep_compound_page(struct page *page, unsigned int order);
 extern void post_alloc_hook(struct page *page, unsigned int order,
 					gfp_t gfp_flags);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 1213d0c67a53..eeef37d37bfa 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -177,6 +177,11 @@ static int __init setup_memhp_default_state(char *str)
 }
 __setup("memhp_default_state=", setup_memhp_default_state);
 
+/*
+ * If this is set, then this memory is allocated when onlining.
+ */
+static bool mhp_allocate;
+
 void mem_hotplug_begin(void)
 {
 	cpus_read_lock();
@@ -595,7 +600,7 @@ void generic_online_page(struct page *page, unsigned int order)
 	 * case in page freeing fast path.
 	 */
 	debug_pagealloc_map_pages(page, 1 << order);
-	__free_pages_core(page, order);
+	__free_pages_core(page, order, mhp_allocate);
 	totalram_pages_add(1UL << order);
 }
 EXPORT_SYMBOL_GPL(generic_online_page);
@@ -1417,10 +1422,16 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
 	if (mhp_flags & MHP_MERGE_RESOURCE)
 		merge_system_ram_resource(res);
 
+	if (mhp_flags & MHP_ALLOCATE)
+		mhp_allocate = true;
+
 	/* online pages if requested */
-	if (mhp_default_online_type != MMOP_OFFLINE)
+	if (mhp_default_online_type != MMOP_OFFLINE ||
+		(mhp_flags & MHP_ALLOCATE))
 		walk_memory_blocks(start, size, NULL, online_memory_block);
 
+	mhp_allocate = false;
+
 	return ret;
 error:
 	if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b0bcab50f0a3..72b3955145ef 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1643,12 +1643,12 @@ static void __free_pages_ok(struct page *page, unsigned int order,
 	__count_vm_events(PGFREE, 1 << order);
 }
 
-void __free_pages_core(struct page *page, unsigned int order)
+void __free_pages_core(struct page *page, unsigned int order, int allocate)
 {
 	unsigned int nr_pages = 1 << order;
 	struct page *p = page;
 	unsigned int loop;
-
+	int count = allocate ? 1 : 0;
 	/*
 	 * When initializing the memmap, __init_single_page() sets the refcount
 	 * of all pages to 1 ("allocated"/"not free"). We have to set the
@@ -1658,13 +1658,18 @@ void __free_pages_core(struct page *page, unsigned int order)
 	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
 		prefetchw(p + 1);
 		__ClearPageReserved(p);
-		set_page_count(p, 0);
+		set_page_count(p, count);
 	}
 	__ClearPageReserved(p);
-	set_page_count(p, 0);
+	set_page_count(p, count);
 
 	atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
 
+	/*
+	 * Don't free the pages if we want them to appear allocated.
+	 */
+	if (allocate)
+		return;
 	/*
 	 * Bypass PCP and place fresh pages right to the tail, primarily
 	 * relevant for memory onlining.
@@ -1729,7 +1734,7 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
 {
 	if (early_page_uninitialised(pfn))
 		return;
-	__free_pages_core(page, order);
+	__free_pages_core(page, order, false);
 }
 
 /*
@@ -1818,14 +1823,14 @@ static void __init deferred_free_range(unsigned long pfn,
 	if (nr_pages == pageblock_nr_pages &&
 	    (pfn & (pageblock_nr_pages - 1)) == 0) {
 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
-		__free_pages_core(page, pageblock_order);
+		__free_pages_core(page, pageblock_order, false);
 		return;
 	}
 
 	for (i = 0; i < nr_pages; i++, page++, pfn++) {
 		if ((pfn & (pageblock_nr_pages - 1)) == 0)
 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
-		__free_pages_core(page, 0);
+		__free_pages_core(page, 0, false);
 	}
 }
 
-- 
2.30.2






[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux