Re: [Crash-utility] Test patch to fix kmem -s bug

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is the patch after making following changes 
o Created a new function gather_cpudata_list_v2_nodes().
o gather_cpudata_list_v2_nodes() will be called for each node and it will update avail with corresponding value of shared memory..
o gather_cpudata_list_v2_nodes() is called inside do_slab_chain_percpu_v2_nodes during SLAB_WALKTHROUGH instead outside as earlier..
o Have removed the commented out section of SLAB_WALKTHROUGH (specified slab).
o updated with FREEBUF at possible exit points.
o updated dump_vm_table() to dump vt->kmem_cache_len_nodes 

Opens
o The si->found field was not getting set for the dump I analysed. so if(si->found) part of the code was not getting executed.
Needs to be checked for this case.


Please go through the patch and let me know of your opinions.
Thanks 
Sharyathi Nagesh


--- crash-4.0-2.22.old/memory.c	2006-03-24 02:21:06.000000000 +0530
+++ crash-4.0-2.22.new/memory.c	2006-04-06 04:22:01.662779728 +0530
@@ -90,6 +90,7 @@ static ulong vaddr_to_slab(ulong);
 static void do_slab_chain(int, struct meminfo *);
 static void do_slab_chain_percpu_v1(long, struct meminfo *);
 static void do_slab_chain_percpu_v2(long, struct meminfo *);
+static void do_slab_chain_percpu_v2_nodes(long, struct meminfo *);
 static void save_slab_data(struct meminfo *);
 static int slab_data_saved(struct meminfo *);
 static void dump_saved_slab_data(void);
@@ -102,6 +103,7 @@ static void gather_slab_free_list(struct
 static void gather_slab_free_list_percpu(struct meminfo *);
 static void gather_cpudata_list_v1(struct meminfo *);
 static void gather_cpudata_list_v2(struct meminfo *);
+static void gather_cpudata_list_v2_nodes(struct meminfo *,int );
 static int check_cpudata_list(struct meminfo *, ulong);
 static int check_shared_list(struct meminfo *, ulong);
 static void gather_slab_cached_count(struct meminfo *);
@@ -355,13 +357,24 @@ vm_init(void)
 			MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache", "name");
 			MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache", 
 				"colour_off");
-			MEMBER_OFFSET_INIT(kmem_cache_s_objsize,  "kmem_cache", 
+			if(MEMBER_EXISTS("kmem_cache","objsize"))
+				MEMBER_OFFSET_INIT(kmem_cache_s_objsize,  "kmem_cache", 
 				"objsize");
+			else if(MEMBER_EXISTS("kmem_cache","buffer_size"))
+				MEMBER_OFFSET_INIT(kmem_cache_s_objsize,  "kmem_cache", 
+				"buffer_size");
 			MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache", "flags");
 			MEMBER_OFFSET_INIT(kmem_cache_s_gfporder,  
 				"kmem_cache", "gfporder");
 
-			MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", "lists");
+			if(MEMBER_EXISTS("kmem_cache","lists"))
+				MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", "lists");
+			else if(MEMBER_EXISTS("kmem_cache","nodelists"))
+			{
+                		vt->flags |= PERCPU_KMALLOC_V2_NODES;
+				MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", "nodelists");
+				ARRAY_LENGTH_INIT(vt->kmem_cache_len_nodes, NULL, "kmem_cache.nodelists", NULL, 0);
+			}
 			MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache", "array");
 			ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.array", NULL, 0);
 		}
@@ -7129,7 +7142,10 @@ dump_kmem_cache_percpu_v2(struct meminfo
                 	"kmem_cache_s num", FAULT_ON_ERROR);
 		si->c_num = (ulong)tmp_val;
 
-		do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si);
+		if( vt->flags &  PERCPU_KMALLOC_V2_NODES )
+			do_slab_chain_percpu_v2_nodes(SLAB_GET_COUNTS, si);
+		else
+			do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si);
 
 		if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) {
 			DUMP_KMEM_CACHE_INFO_V2();
@@ -7143,11 +7159,15 @@ dump_kmem_cache_percpu_v2(struct meminfo
 
 		if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) {
 
+		if( !(vt->flags &  PERCPU_KMALLOC_V2_NODES))
 			gather_cpudata_list_v2(si);
 
                         si->slab = (si->flags & ADDRESS_SPECIFIED) ?
                         	vaddr_to_slab(si->spec_addr) : 0;
 
+		if( vt->flags &  PERCPU_KMALLOC_V2_NODES )
+			do_slab_chain_percpu_v2_nodes(SLAB_WALKTHROUGH, si);
+		else 
 			do_slab_chain_percpu_v2(SLAB_WALKTHROUGH, si);
 
 			if (si->found) {
@@ -7836,6 +7856,234 @@ do_slab_chain_percpu_v2(long cmd, struct
 	}
 }
 
+
+/* 
+* Added To  Traverse the Nodelists 
+*/
+
+static void
+do_slab_chain_percpu_v2_nodes(long cmd, struct meminfo *si)
+{
+	int i, tmp, s;
+	int list_borked;
+	char *slab_buf;
+	ulong specified_slab;
+	ulong last;
+	ulong slab_chains[SLAB_CHAINS];
+	ulong *start_address;
+	int index;
+
+	list_borked = 0;
+	si->slabsize = (power(2, si->order) * PAGESIZE());
+	si->cpucached_slab = 0;
+	start_address = (ulong *) GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes);
+
+	if (!readmem(si->cache+OFFSET(kmem_cache_s_lists), 
+                      KVADDR, &start_address[0], 
+                     sizeof(ulong) * vt->kmem_cache_len_nodes , 
+                      "array nodelist array", RETURN_ON_ERROR)) 
+                          error(INFO, "Error encountered with reading nodelists"); 
+
+
+	switch (cmd)
+	{
+	case SLAB_GET_COUNTS:
+		si->flags |= SLAB_GET_COUNTS;
+		si->flags &= ~SLAB_WALKTHROUGH;
+		si->cpucached_cache = 0;
+        	si->num_slabs = si->inuse = 0;
+		slab_buf = GETBUF(SIZE(slab));
+		for( index=0 ; index < vt->kmem_cache_len_nodes && start_address[index] ; index++)
+		{ 
+			slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial);
+			slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full);
+		        slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free);
+			
+			gather_cpudata_list_v2_nodes(si,index); 
+	
+		        if (CRASHDEBUG(1)) {
+                	fprintf(fp, "[ %s: %lx ", si->curname, si->cache);
+	                fprintf(fp, "partial: %lx full: %lx free: %lx ]\n",
+                        slab_chains[0], slab_chains[1], slab_chains[2]);
+			}
+
+			for (s = 0; s < SLAB_CHAINS; s++) {
+				if (!slab_chains[s])
+					continue;
+	
+		                if (!readmem(slab_chains[s],
+	        	            KVADDR, &si->slab, sizeof(ulong),
+	                	    "first slab", QUIET|RETURN_ON_ERROR)) {
+	                                error(INFO, 
+					    "%s: %s list: bad slab pointer: %lx\n",
+                	                        si->curname,
+						slab_chain_name_v2[s],
+                                	        slab_chains[s]);
+					list_borked = 1;
+					continue;
+				}
+	
+				if (slab_data_saved(si)) {
+					FREEBUF(slab_buf);
+					FREEBUF(start_address);
+					return;
+				}
+			
+				if (si->slab == slab_chains[s]) 
+					continue;
+	
+				last = slab_chains[s];
+
+				do {
+	        	                if (received_SIGINT()) {
+						FREEBUF(slab_buf);
+						FREEBUF(start_address);
+	                        	        restart(0);
+					}
+
+					if (!verify_slab_v2(si, last, s)) {
+						list_borked = 1;
+						continue;
+						}
+					last = si->slab - OFFSET(slab_list);
+		
+		        	        readmem(si->slab, KVADDR, slab_buf, 
+						SIZE(slab), "slab buffer", 
+						FAULT_ON_ERROR);
+		
+					tmp = INT(slab_buf + OFFSET(slab_inuse));
+					si->inuse += tmp;
+	
+					if (ACTIVE())
+						gather_cpudata_list_v2_nodes(si,index); 
+
+					si->s_mem = ULONG(slab_buf + 
+						OFFSET(slab_s_mem));
+					gather_slab_cached_count(si);
+	
+					si->num_slabs++;
+		
+					si->slab = ULONG(slab_buf + 
+						OFFSET(slab_list));
+					si->slab -= OFFSET(slab_list);
+
+				/*
+				 *  Check for slab transition. (Tony Dziedzic)
+				 */
+					for (i = 0; i < SLAB_CHAINS; i++) {
+     						if ((i != s) && 
+						    (si->slab == slab_chains[i])) {
+       							error(NOTE, 
+		  	                      "%s: slab chain inconsistency: %s list\n",
+								si->curname,
+								slab_chain_name_v2[s]);
+       							list_borked = 1;
+     						}
+					}
+			
+				} while (si->slab != slab_chains[s] && !list_borked);
+			}
+		}
+
+		if (!list_borked)
+			save_slab_data(si);
+		break;
+
+	case SLAB_WALKTHROUGH:
+		specified_slab = si->slab;     
+		si->flags |= SLAB_WALKTHROUGH;
+		si->flags &= ~SLAB_GET_COUNTS;
+		slab_buf = GETBUF(SIZE(slab));
+		for( index=0 ; index < vt->kmem_cache_len_nodes && start_address[index] ; index++)
+		{ 
+			slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial);
+			slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full);
+		        slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free);
+	
+			gather_cpudata_list_v2_nodes(si,index);
+ 
+		        if (CRASHDEBUG(1)) {
+                	fprintf(fp, "[ %s: %lx ", si->curname, si->cache);
+	                fprintf(fp, "partial: %lx full: %lx free: %lx ]\n",
+                        slab_chains[0], slab_chains[1], slab_chains[2]);
+			}
+
+			for (s = 0; s < SLAB_CHAINS; s++) {
+				if (!slab_chains[s])
+					continue;
+
+	        	if (!specified_slab) {
+	                	if (!readmem(slab_chains[s],
+	       	                    KVADDR, &si->slab, sizeof(ulong),
+	               	            "slabs", QUIET|RETURN_ON_ERROR)) {
+                               	        error(INFO,
+	                                        "%s: %s list: bad slab pointer: %lx\n",
+                                                si->curname,
+						slab_chain_name_v2[s],
+                       	                        slab_chains[s]);
+						list_borked = 1;
+						continue;
+					}
+					last = slab_chains[s];
+				} else
+					last = 0;
+			
+				if (si->slab == slab_chains[s])
+					continue;
+				
+				readmem(si->slab, KVADDR, slab_buf, 
+						SIZE(slab), "slab buffer", 
+						FAULT_ON_ERROR);
+		
+				si->s_mem = ULONG(slab_buf + 
+						OFFSET(slab_s_mem));
+
+	
+				if (CRASHDEBUG(1)) {
+					fprintf(fp, "search cache: [%s] ", si->curname);
+					if (si->flags & ADDRESS_SPECIFIED) 
+						fprintf(fp, "for %llx", si->spec_addr);
+					fprintf(fp, "\n");
+					}
+	
+			        do {
+		                        if (received_SIGINT())
+					{
+						FREEBUF(start_address);
+						FREEBUF(slab_buf);
+	        	                        restart(0);
+					}
+	
+                        	        if (!verify_slab_v2(si, last, s)) {
+                                	        list_borked = 1;
+                                        	continue;
+	                                }
+        	                        last = si->slab - OFFSET(slab_list);
+	
+			                dump_slab_percpu_v2(si);
+					
+					if (si->found) {
+						FREEBUF(start_address);
+						FREEBUF(slab_buf);
+						return;
+					}
+		
+			                readmem(si->slab+OFFSET(slab_list),
+			                        KVADDR, &si->slab, sizeof(ulong),
+			                        "slab list", FAULT_ON_ERROR);
+			
+					si->slab -= OFFSET(slab_list);
+	
+			        } while (si->slab != slab_chains[s] && !list_borked);
+			}
+		}
+
+		break;
+	}
+	FREEBUF(slab_buf);
+	FREEBUF(start_address);
+}
+
 /*
  *  Try to preclude any attempt to translate a bogus slab structure.
  */
@@ -8757,6 +9005,99 @@ gather_cpudata_list_v2(struct meminfo *s
                 	fprintf(fp, "  %lx (shared list)\n", si->shared_array_cache[j]);
 }
 
+
+
+/*
+ *  Updated gather_cpudata_list_v2 to take care of recent changes in kmem_cache 
+ */
+
+static void
+gather_cpudata_list_v2_nodes(struct meminfo *si, int index)
+{
+        int i, j;
+	int avail;
+        ulong cpudata[NR_CPUS];
+	ulong shared;
+	ulong *start_address;
+	
+	start_address = (ulong *) GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes);
+        readmem(si->cache+OFFSET(kmem_cache_s_array),
+                KVADDR, &cpudata[0], 
+		sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array),
+                "array_cache array", FAULT_ON_ERROR);
+
+        for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && 
+	     cpudata[i]; i++) {
+		BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit);
+
+                readmem(cpudata[i]+OFFSET(array_cache_avail),
+                        KVADDR, &avail, sizeof(int),
+                        "array cache avail", FAULT_ON_ERROR);
+
+		if (!avail) 
+			continue;
+
+		if (avail > vt->kmem_max_limit) {
+			error(INFO, 
+	  	  "\"%s\" cache: array_cache.avail %d greater than limit %ld\n",
+				si->curname, avail, vt->kmem_max_limit);
+			si->errors++;
+		}
+
+		if (CRASHDEBUG(2))
+			fprintf(fp, "%s: cpu[%d] avail: %d\n", 
+				si->curname, i, avail);
+		
+                readmem(cpudata[i]+SIZE(array_cache),
+                        KVADDR, si->cpudata[i],
+			sizeof(void *) * avail,
+                        "array_cache avail", FAULT_ON_ERROR);
+
+		if (CRASHDEBUG(2))
+			for (j = 0; j < avail; j++)
+				fprintf(fp, "  %lx (cpu %d)\n", si->cpudata[i][j], i);
+        }
+
+        /*
+         *  If the shared list contains anything, gather them as well.
+         */
+	BZERO(si->shared_array_cache, sizeof(ulong) * vt->kmem_max_limit);
+
+	if (!readmem(si->cache+OFFSET(kmem_cache_s_lists), KVADDR, &start_address[0], 
+	                     sizeof(ulong) * vt->kmem_cache_len_nodes , 
+        	              "array nodelist array", RETURN_ON_ERROR) ||  
+	!readmem(start_address[index] + OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *),
+	"kmem_list3 shared", RETURN_ON_ERROR|QUIET) ||
+	!readmem(shared + OFFSET(array_cache_avail),
+       	KVADDR, &avail, sizeof(int), "shared array_cache avail",
+        RETURN_ON_ERROR|QUIET) || !avail){
+	FREEBUF(start_address);
+	return;
+	}
+
+	if (avail > vt->kmem_max_limit) {
+		error(INFO, 
+  	  "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n",
+			si->curname, avail, vt->kmem_max_limit);
+		si->errors++;
+		FREEBUF(start_address);
+		return;
+	}
+
+	if (CRASHDEBUG(2))
+		fprintf(fp, "%s: shared avail: %d\n", 
+			si->curname, avail);
+
+        readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache,
+        	sizeof(void *) * avail, "shared array_cache avail", 
+		FAULT_ON_ERROR);
+
+        if (CRASHDEBUG(2))
+        	for (j = 0; j < avail; j++)
+                	fprintf(fp, "  %lx (shared list)\n", si->shared_array_cache[j]);
+	FREEBUF(start_address);
+}
+
 /*
  *  Check whether a given address is contained in the previously-gathered
  *  percpu object cache.
@@ -9149,6 +9490,7 @@ dump_vm_table(int verbose)
 	fprintf(fp, "      kmem_max_cpus: %ld\n", vt->kmem_max_cpus);
 	fprintf(fp, "   kmem_cache_count: %ld\n", vt->kmem_cache_count);
 	fprintf(fp, " kmem_cache_namelen: %d\n", vt->kmem_cache_namelen);
+	fprintf(fp, " kmem_cache_nodelist_len: %ld\n", vt->kmem_cache_len_nodes);
 	fprintf(fp, "        PG_reserved: %lx\n", vt->PG_reserved);
 	fprintf(fp, "            PG_slab: %ld\n", vt->PG_slab);
 	fprintf(fp, "        paddr_prlen: %d\n", vt->paddr_prlen);


--- crash-4.0-2.22.old/defs.h	2006-03-24 02:21:06.000000000 +0530
+++ crash-4.0-2.22.new/defs.h	2006-04-06 04:15:46.901752064 +0530
@@ -1485,6 +1485,7 @@ struct vm_table {                /* kern
 	ulong kmem_max_limit;
 	ulong kmem_max_cpus;
 	ulong kmem_cache_count;
+	ulong kmem_cache_len_nodes;
 	ulong PG_reserved;
 	ulong PG_slab;
 	int kmem_cache_namelen;
@@ -1521,6 +1522,7 @@ struct vm_table {                /* kern
 #define DISCONTIGMEM		(0x200)
 #define SPARSEMEM		(0x400)
 #define SPARSEMEM_EX		(0x800)
+#define PERCPU_KMALLOC_V2_NODES   (0x1000)
 
 #define IS_FLATMEM()		(vt->flags & FLATMEM)
 #define IS_DISCONTIGMEM()	(vt->flags & DISCONTIGMEM)

[Index of Archives]     [Fedora Development]     [Fedora Desktop]     [Fedora SELinux]     [Yosemite News]     [KDE Users]     [Fedora Tools]

 

Powered by Linux