[folded-merged] mm-page_owner-decouple-freeing-stack-trace-from-debug_pagealloc-v3.patch removed from -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled
     Subject: mm-page_owner-decouple-freeing-stack-trace-from-debug_pagealloc-v3
has been removed from the -mm tree.  Its filename was
     mm-page_owner-decouple-freeing-stack-trace-from-debug_pagealloc-v3.patch

This patch was dropped because it was folded into mm-page_owner-decouple-freeing-stack-trace-from-debug_pagealloc.patch

------------------------------------------------------
From: Vlastimil Babka <vbabka@xxxxxxx>
Subject: mm-page_owner-decouple-freeing-stack-trace-from-debug_pagealloc-v3

Qian Cai suggested that the extra boot option and page_ext ops is
unnecessary for a debugging option, unless somebody really complains about
the overhead, with numbers.  So patch 2 is greatly simplified.

Link: http://lkml.kernel.org/r/20191007091808.7096-3-vbabka@xxxxxxx
Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx>
Suggested-by: Dmitry Vyukov <dvyukov@xxxxxxxxxx>
Suggested-by: Walter Wu <walter-zh.wu@xxxxxxxxxxxx>
Suggested-by: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx>
Suggested-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx>
Suggested-by: Qian Cai <cai@xxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 Documentation/admin-guide/kernel-parameters.txt |    8 -
 include/linux/page_owner.h                      |    1 
 mm/page_ext.c                                   |    1 
 mm/page_owner.c                                 |   76 ++------------
 4 files changed, 11 insertions(+), 75 deletions(-)

--- a/Documentation/admin-guide/kernel-parameters.txt~mm-page_owner-decouple-freeing-stack-trace-from-debug_pagealloc-v3
+++ a/Documentation/admin-guide/kernel-parameters.txt
@@ -3246,14 +3246,6 @@
 			we can turn it on.
 			on: enable the feature
 
-	page_owner_free=
-			[KNL] When enabled together with page_owner, store also
-			the stack of who frees a page, for error page dump
-			purposes. This is also implicitly enabled by
-			debug_pagealloc=on or KASAN, so only page_owner=on is
-			sufficient in those cases.
-			on: enable the feature
-
 	page_poison=	[KNL] Boot-time parameter changing the state of
 			poisoning on the buddy allocator, available with
 			CONFIG_PAGE_POISONING=y.
--- a/include/linux/page_owner.h~mm-page_owner-decouple-freeing-stack-trace-from-debug_pagealloc-v3
+++ a/include/linux/page_owner.h
@@ -7,7 +7,6 @@
 #ifdef CONFIG_PAGE_OWNER
 extern struct static_key_false page_owner_inited;
 extern struct page_ext_operations page_owner_ops;
-extern struct page_ext_operations page_owner_free_ops;
 
 extern void __reset_page_owner(struct page *page, unsigned int order);
 extern void __set_page_owner(struct page *page,
--- a/mm/page_ext.c~mm-page_owner-decouple-freeing-stack-trace-from-debug_pagealloc-v3
+++ a/mm/page_ext.c
@@ -61,7 +61,6 @@
 static struct page_ext_operations *page_ext_ops[] = {
 #ifdef CONFIG_PAGE_OWNER
 	&page_owner_ops,
-	&page_owner_free_ops,
 #endif
 #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
 	&page_idle_ops,
--- a/mm/page_owner.c~mm-page_owner-decouple-freeing-stack-trace-from-debug_pagealloc-v3
+++ a/mm/page_owner.c
@@ -24,16 +24,11 @@ struct page_owner {
 	short last_migrate_reason;
 	gfp_t gfp_mask;
 	depot_stack_handle_t handle;
-};
-
-struct page_owner_free {
 	depot_stack_handle_t free_handle;
 };
 
 static bool page_owner_enabled = false;
-static bool page_owner_free_enabled = false;
 DEFINE_STATIC_KEY_FALSE(page_owner_inited);
-static DEFINE_STATIC_KEY_FALSE(page_owner_free_stack);
 
 static depot_stack_handle_t dummy_handle;
 static depot_stack_handle_t failure_handle;
@@ -58,29 +53,6 @@ static bool need_page_owner(void)
 	return page_owner_enabled;
 }
 
-static int __init early_page_owner_free_param(char *buf)
-{
-	if (!buf)
-		return -EINVAL;
-
-	if (strcmp(buf, "on") == 0)
-		page_owner_free_enabled = true;
-
-	return 0;
-}
-early_param("page_owner_free", early_page_owner_free_param);
-
-static bool need_page_owner_free(void) {
-
-	if (!page_owner_enabled)
-		return false;
-
-	if (IS_ENABLED(CONFIG_KASAN) || debug_pagealloc_enabled())
-		page_owner_free_enabled = true;
-
-	return page_owner_free_enabled;
-}
-
 static __always_inline depot_stack_handle_t create_dummy_stack(void)
 {
 	unsigned long entries[4];
@@ -117,36 +89,17 @@ static void init_page_owner(void)
 	init_early_allocated_pages();
 }
 
-static void init_page_owner_free(void)
-{
-	if (!page_owner_enabled || !page_owner_free_enabled)
-		return;
-
-	static_branch_enable(&page_owner_free_stack);
-}
-
 struct page_ext_operations page_owner_ops = {
 	.size = sizeof(struct page_owner),
 	.need = need_page_owner,
 	.init = init_page_owner,
 };
 
-struct page_ext_operations page_owner_free_ops = {
-	.size = sizeof(struct page_owner_free),
-	.need = need_page_owner_free,
-	.init = init_page_owner_free,
-};
-
 static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
 {
 	return (void *)page_ext + page_owner_ops.offset;
 }
 
-static inline struct page_owner_free *get_page_owner_free(struct page_ext *page_ext)
-{
-	return (void *)page_ext + page_owner_free_ops.offset;
-}
-
 static inline bool check_recursive_alloc(unsigned long *entries,
 					 unsigned int nr_entries,
 					 unsigned long ip)
@@ -191,20 +144,17 @@ void __reset_page_owner(struct page *pag
 	int i;
 	struct page_ext *page_ext;
 	depot_stack_handle_t handle = 0;
-	struct page_owner_free *page_owner_free;
+	struct page_owner *page_owner;
 
-	if (static_branch_unlikely(&page_owner_free_stack))
-		handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
+	handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
 
 	page_ext = lookup_page_ext(page);
 	if (unlikely(!page_ext))
 		return;
 	for (i = 0; i < (1 << order); i++) {
 		__clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
-		if (static_branch_unlikely(&page_owner_free_stack)) {
-			page_owner_free = get_page_owner_free(page_ext);
-			page_owner_free->free_handle = handle;
-		}
+		page_owner = get_page_owner(page_ext);
+		page_owner->free_handle = handle;
 		page_ext = page_ext_next(page_ext);
 	}
 }
@@ -452,7 +402,6 @@ void __dump_page_owner(struct page *page
 {
 	struct page_ext *page_ext = lookup_page_ext(page);
 	struct page_owner *page_owner;
-	struct page_owner_free *page_owner_free;
 	depot_stack_handle_t handle;
 	unsigned long *entries;
 	unsigned int nr_entries;
@@ -489,16 +438,13 @@ void __dump_page_owner(struct page *page
 		stack_trace_print(entries, nr_entries, 0);
 	}
 
-	if (static_branch_unlikely(&page_owner_free_stack)) {
-		page_owner_free = get_page_owner_free(page_ext);
-		handle = READ_ONCE(page_owner_free->free_handle);
-		if (!handle) {
-			pr_alert("page_owner free stack trace missing\n");
-		} else {
-			nr_entries = stack_depot_fetch(handle, &entries);
-			pr_alert("page last free stack trace:\n");
-			stack_trace_print(entries, nr_entries, 0);
-		}
+	handle = READ_ONCE(page_owner->free_handle);
+	if (!handle) {
+		pr_alert("page_owner free stack trace missing\n");
+	} else {
+		nr_entries = stack_depot_fetch(handle, &entries);
+		pr_alert("page last free stack trace:\n");
+		stack_trace_print(entries, nr_entries, 0);
 	}
 
 	if (page_owner->last_migrate_reason != -1)
_

Patches currently in -mm which might be from vbabka@xxxxxxx are

mm-page_owner-fix-off-by-one-error-in-__set_page_owner_handle.patch
mm-page_owner-decouple-freeing-stack-trace-from-debug_pagealloc.patch
mm-page_owner-rename-flag-indicating-that-page-is-allocated.patch
mm-compaction-fix-wrong-pfn-handling-in-__reset_isolation_pfn.patch




[Index of Archives]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux