[RFC PATCH bpf-next 5/9] mm: Account active vm for page

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Account active vm for page allocation and unaccount then page is freed.
We can reuse the slab_data in struct active_vm to store the information
of page allocation.

Signed-off-by: Yafang Shao <laoar.shao@xxxxxxxxx>
---
 include/linux/page_ext.h |  1 +
 mm/active_vm.c           | 38 +++++++++++++++++++++++++++++++++++++-
 mm/active_vm.h           | 12 ++++++++++++
 mm/page_alloc.c          | 14 ++++++++++++++
 4 files changed, 64 insertions(+), 1 deletion(-)

diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 22be4582faae..5d02f939d5df 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -5,6 +5,7 @@
 #include <linux/types.h>
 #include <linux/stacktrace.h>
 #include <linux/stackdepot.h>
+#include <linux/active_vm.h>
 
 struct pglist_data;
 struct page_ext_operations {
diff --git a/mm/active_vm.c b/mm/active_vm.c
index ee38047a4adc..a06987639e00 100644
--- a/mm/active_vm.c
+++ b/mm/active_vm.c
@@ -34,7 +34,10 @@ static void __init init_active_vm(void)
 }
 
 struct active_vm {
-	int *slab_data;     /* for slab */
+	union {
+		int *slab_data;     /* for slab */
+		unsigned long page_data;	/* for page */
+	}
 };
 
 struct page_ext_operations active_vm_ops = {
@@ -165,3 +168,36 @@ void active_vm_slab_sub(struct kmem_cache *s, struct slab *slab, void **p, int c
 	}
 	page_ext_put(page_ext);
 }
+
+void page_set_active_vm(struct page *page, unsigned int item, unsigned int order)
+{
+	struct page_ext *page_ext = page_ext_get(page);
+	struct active_vm *av;
+
+	if (unlikely(!page_ext))
+		return;
+
+	av = (void *)(page_ext) + active_vm_ops.offset;
+	WARN_ON_ONCE(av->page_data != 0);
+	av->page_data = item;
+	page_ext_put(page_ext);
+	active_vm_item_add(item, PAGE_SIZE << order);
+}
+
+void page_test_clear_active_vm(struct page *page, unsigned int order)
+{
+	struct page_ext *page_ext = page_ext_get(page);
+	struct active_vm *av;
+
+	if (unlikely(!page_ext))
+		return;
+
+	av = (void *)(page_ext) + active_vm_ops.offset;
+	if (av->page_data <= 0)
+		goto out;
+
+	active_vm_item_sub(av->page_data, PAGE_SIZE << order);
+	av->page_data = 0;
+out:
+	page_ext_put(page_ext);
+}
diff --git a/mm/active_vm.h b/mm/active_vm.h
index cf80b35412c5..1ff27b0b5dbe 100644
--- a/mm/active_vm.h
+++ b/mm/active_vm.h
@@ -10,6 +10,8 @@ extern struct page_ext_operations active_vm_ops;
 void active_vm_slab_add(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
 void active_vm_slab_sub(struct kmem_cache *s, struct slab *slab, void **p, int cnt);
 void active_vm_slab_free(struct slab *slab);
+void page_set_active_vm(struct page *page, unsigned int item, unsigned int order);
+void page_test_clear_active_vm(struct page *page, unsigned int order);
 
 static inline int active_vm_item(void)
 {
@@ -33,6 +35,7 @@ static inline void active_vm_item_sub(int item, long delta)
 	WARN_ON_ONCE(item <= 0);
 	this_cpu_sub(active_vm_stats.stat[item - 1], delta);
 }
+
 #else /* CONFIG_ACTIVE_VM */
 static inline int active_vm_item(void)
 {
@@ -58,5 +61,14 @@ static inline void active_vm_slab_sub(struct kmem_cache *s, struct slab *slab, v
 static inline void active_vm_slab_free(struct slab *slab)
 {
 }
+
+static inline void page_set_active_vm(struct page *page, int item,
+									  unsigned int order)
+{
+}
+
+static inline void page_test_clear_active_vm(struct page *page, unsigned int order)
+{
+}
 #endif /* CONFIG_ACTIVE_VM */
 #endif /* __MM_ACTIVE_VM_H */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6e60657875d3..deac544e9050 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -76,6 +76,8 @@
 #include <linux/khugepaged.h>
 #include <linux/buffer_head.h>
 #include <linux/delayacct.h>
+#include <linux/page_ext.h>
+#include <linux/active_vm.h>
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -83,6 +85,7 @@
 #include "shuffle.h"
 #include "page_reporting.h"
 #include "swap.h"
+#include "active_vm.h"
 
 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
 typedef int __bitwise fpi_t;
@@ -1449,6 +1452,10 @@ static __always_inline bool free_pages_prepare(struct page *page,
 		page->mapping = NULL;
 	if (memcg_kmem_enabled() && PageMemcgKmem(page))
 		__memcg_kmem_uncharge_page(page, order);
+
+	if (active_vm_enabled())
+		page_test_clear_active_vm(page, order);
+
 	if (check_free && free_page_is_bad(page))
 		bad++;
 	if (bad)
@@ -5577,6 +5584,13 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
 		page = NULL;
 	}
 
+	if (active_vm_enabled() && (gfp & __GFP_ACCOUNT) && page) {
+		int active_vm = active_vm_item();
+
+		if (active_vm > 0)
+			page_set_active_vm(page, active_vm, order);
+	}
+
 	trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
 	kmsan_alloc_page(page, order, alloc_gfp);
 
-- 
2.30.1 (Apple Git-130)





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux