display slabs in cpu partial list for slub

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hello Dave,

Please check this patch. It is used for slub to display slabs in
kmem_cache_cpu.partial list.

I can only get it test on RHEL7.0GA right now. Would you please
help get some other test? Thanks a lot.

--
Regards
Qiao Nuohan
From f47f195e4c5e9d3b0e013f6f68b9f1e9d0cf58fa Mon Sep 17 00:00:00 2001
From: qiaonuohan <qiaonuohan@xxxxxxxxxxxxxx>
Date: Tue, 22 Jul 2014 05:18:53 -0400
Subject: [PATCH] display slabs in cpu partial list for slub

The following kernel commit adds per cpu partial list.

commit 49e2258586b423684f03c278149ab46d8f8b6700
Author: Christoph Lameter <cl@xxxxxxxxx>
Date:   Tue Aug 9 16:12:27 2011 -0500

    slub: per cpu cache for partial pages

This patch is used for slub. It will display slabs in per cpu partial pages.
---
 defs.h   |  1 +
 memory.c | 51 +++++++++++++++++++++++++++++++++++++++++++++------
 2 files changed, 46 insertions(+), 6 deletions(-)

diff --git a/defs.h b/defs.h
index 44df6ae..1798d7b 100755
--- a/defs.h
+++ b/defs.h
@@ -1661,6 +1661,7 @@ struct offset_table {                    /* stash of commonly-used offsets */
         long kmem_cache_cpu_freelist;
         long kmem_cache_cpu_page;
         long kmem_cache_cpu_node;
+	long kmem_cache_cpu_partial;
 	long kmem_cache_flags;
 	long zone_nr_active;
 	long zone_nr_inactive;
diff --git a/memory.c b/memory.c
index c97dd39..8a16426 100755
--- a/memory.c
+++ b/memory.c
@@ -271,6 +271,7 @@ static long count_partial(ulong, struct meminfo *);
 static ulong get_freepointer(struct meminfo *, void *);
 static int count_free_objects(struct meminfo *, ulong);
 char *is_slab_page(struct meminfo *, char *);
+static void do_cpu_partial_slub(struct meminfo *, int);
 static void do_node_lists_slub(struct meminfo *, ulong, int);
 static int devmem_is_restricted(void);
 static int switch_to_proc_kcore(void);
@@ -392,6 +393,8 @@ vm_init(void)
 	MEMBER_OFFSET_INIT(page_next, "page", "next");
 	if (VALID_MEMBER(page_next)) 
 		MEMBER_OFFSET_INIT(page_prev, "page", "prev");
+	if (INVALID_MEMBER(page_next))
+		ANON_MEMBER_OFFSET_INIT(page_next, "page", "next");
 
 	MEMBER_OFFSET_INIT(page_list, "page", "list");
 	if (VALID_MEMBER(page_list)) {
@@ -675,6 +678,7 @@ vm_init(void)
 		MEMBER_OFFSET_INIT(kmem_cache_cpu_freelist, "kmem_cache_cpu", "freelist");
 		MEMBER_OFFSET_INIT(kmem_cache_cpu_page, "kmem_cache_cpu", "page");
 		MEMBER_OFFSET_INIT(kmem_cache_cpu_node, "kmem_cache_cpu", "node");
+		MEMBER_OFFSET_INIT(kmem_cache_cpu_partial, "kmem_cache_cpu", "partial");
 		ANON_MEMBER_OFFSET_INIT(page_inuse, "page", "inuse");
 		ANON_MEMBER_OFFSET_INIT(page_offset, "page", "offset");
 		ANON_MEMBER_OFFSET_INIT(page_slab, "page", "slab");
@@ -17324,6 +17328,34 @@ bailout:
 	return FALSE;
 }
 
+static void
+do_cpu_partial_slub(struct meminfo *si, int cpu)
+{
+	ulong cpu_slab_ptr;
+	void *partial;
+
+	cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab)) +
+				kt->__per_cpu_offset[cpu];
+	readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_partial), KVADDR,
+		&partial, sizeof(void *), "kmem_cache_cpu.partial",
+		RETURN_ON_ERROR);
+
+	fprintf(fp, "CPU %d PARTIAL:\n%s", cpu,
+		partial ? "" : "  (empty)\n");
+
+	/*
+	 * kmem_cache_cpu.partial points to the first page of per cpu partial
+	 * list.
+	 */ 
+	while (partial) {
+		si->slab = (ulong)partial;
+		do_slab_slub(si, VERBOSE);
+
+		readmem((ulong)partial + OFFSET(page_next), KVADDR, &partial,
+			sizeof(void *), "page.next", RETURN_ON_ERROR);
+
+	}
+}
 
 static void
 do_kmem_cache_slub(struct meminfo *si)  
@@ -17337,19 +17369,26 @@ do_kmem_cache_slub(struct meminfo *si)
 	per_cpu = (ulong *)GETBUF(sizeof(ulong) * vt->numnodes);
 
         for (i = 0; i < kt->cpus; i++) {
+		cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab)) +
+				kt->__per_cpu_offset[i];
+		fprintf(fp, "KMEM_CACHE_CPU    CPU\n%lx%s%3d\n", cpu_slab_ptr,
+			space(VADDR_PRLEN > 8 ? 2 : 10), i);
+
 		cpu_slab_ptr = get_cpu_slab_ptr(si, i, NULL);
 
 		fprintf(fp, "CPU %d SLAB:\n%s", i, 
 			cpu_slab_ptr ? "" : "  (empty)\n");
 
-                if (!cpu_slab_ptr)
-                        continue;
+                if (cpu_slab_ptr) {
+                	if ((n = page_to_nid(cpu_slab_ptr)) >= 0)
+				per_cpu[n]++;
 
-                if ((n = page_to_nid(cpu_slab_ptr)) >= 0)
-			per_cpu[n]++;
+			si->slab = cpu_slab_ptr;
+			do_slab_slub(si, VERBOSE);
+		}
 
-		si->slab = cpu_slab_ptr;
-		do_slab_slub(si, VERBOSE);
+		if (VALID_MEMBER(kmem_cache_cpu_partial))
+			do_cpu_partial_slub(si, i);
 
 		if (received_SIGINT())
 			restart(0);
-- 
1.8.3.1

--
Crash-utility mailing list
Crash-utility@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/crash-utility

[Index of Archives]     [Fedora Development]     [Fedora Desktop]     [Fedora SELinux]     [Yosemite News]     [KDE Users]     [Fedora Tools]

 

Powered by Linux