Re: [Crash-utility] Test patch to fix kmem -s bug

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is the reworked/consolidated patch 
Addressed 
o Redundant update of cpudata.
o Extended the shared_array_cache.
o Created si->current_cache_index to hold current index of si->shared_array_cache
o Updated with routine to update vt->kmem_max_limit for multi node architecture.
Regards
Sharyathi Nagesh








--- crash-4.0-2.22.old/memory.c	2006-03-24 02:21:06.000000000 +0530
+++ crash-4.0-2.22.new/memory.c	2006-04-10 22:23:34.080886096 +0530
@@ -46,6 +46,7 @@ struct meminfo {           /* general pu
 	int *kmem_bufctl;
 	ulong *cpudata[NR_CPUS];
 	ulong *shared_array_cache;
+	int current_cache_index;
 	ulong found;
 	ulong retval;
 	char *ignore;
@@ -90,6 +91,7 @@ static ulong vaddr_to_slab(ulong);
 static void do_slab_chain(int, struct meminfo *);
 static void do_slab_chain_percpu_v1(long, struct meminfo *);
 static void do_slab_chain_percpu_v2(long, struct meminfo *);
+static void do_slab_chain_percpu_v2_nodes(long, struct meminfo *);
 static void save_slab_data(struct meminfo *);
 static int slab_data_saved(struct meminfo *);
 static void dump_saved_slab_data(void);
@@ -102,6 +104,7 @@ static void gather_slab_free_list(struct
 static void gather_slab_free_list_percpu(struct meminfo *);
 static void gather_cpudata_list_v1(struct meminfo *);
 static void gather_cpudata_list_v2(struct meminfo *);
+static void gather_cpudata_list_v2_nodes(struct meminfo *,int );
 static int check_cpudata_list(struct meminfo *, ulong);
 static int check_shared_list(struct meminfo *, ulong);
 static void gather_slab_cached_count(struct meminfo *);
@@ -355,13 +358,24 @@ vm_init(void)
 			MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache", "name");
 			MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache", 
 				"colour_off");
-			MEMBER_OFFSET_INIT(kmem_cache_s_objsize,  "kmem_cache", 
+			if(MEMBER_EXISTS("kmem_cache","objsize"))
+				MEMBER_OFFSET_INIT(kmem_cache_s_objsize,  "kmem_cache", 
 				"objsize");
+			else if(MEMBER_EXISTS("kmem_cache","buffer_size"))
+				MEMBER_OFFSET_INIT(kmem_cache_s_objsize,  "kmem_cache", 
+				"buffer_size");
 			MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache", "flags");
 			MEMBER_OFFSET_INIT(kmem_cache_s_gfporder,  
 				"kmem_cache", "gfporder");
 
-			MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", "lists");
+			if(MEMBER_EXISTS("kmem_cache","lists"))
+				MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", "lists");
+			else if(MEMBER_EXISTS("kmem_cache","nodelists"))
+			{
+                		vt->flags |= PERCPU_KMALLOC_V2_NODES;
+				MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", "nodelists");
+				ARRAY_LENGTH_INIT(vt->kmem_cache_len_nodes, NULL, "kmem_cache.nodelists", NULL, 0);
+			}
 			MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache", "array");
 			ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.array", NULL, 0);
 		}
@@ -6412,11 +6426,15 @@ max_cpudata_limit(ulong cache, ulong *cp
 	int limit; 
 	ulong max_limit;
 	ulong shared; 
-
+	ulong *start_address;
+	
+	if (vt->flags & PERCPU_KMALLOC_V2_NODES)
+		goto kmem_cache_s_array_nodes;
+	
 	if (vt->flags & PERCPU_KMALLOC_V2)
 		goto kmem_cache_s_array;
-
-        if (INVALID_MEMBER(kmem_cache_s_cpudata)) {
+	
+	 if (INVALID_MEMBER(kmem_cache_s_cpudata)) {
 		*cpus = 0;
 		return 0;
 	}
@@ -6477,6 +6495,47 @@ kmem_cache_s_array:
 	*cpus = i;
 	return max_limit;
 
+kmem_cache_s_array_nodes:
+
+	if (!readmem(cache+OFFSET(kmem_cache_s_array),
+            KVADDR, &cpudata[0], 
+	    sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array),
+            "array cache array", RETURN_ON_ERROR))
+		goto bail_out;
+
+	for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && 
+	     cpudata[i]; i++) {
+                if (!readmem(cpudata[i]+OFFSET(array_cache_limit),
+                    KVADDR, &limit, sizeof(int),
+                    "array cache limit", RETURN_ON_ERROR))
+			goto bail_out;
+                if (limit > max_limit)
+                        max_limit = limit;
+        }
+
+	*cpus = i;
+	/*
+	 *  In the shared list of all the nodes.
+	 */
+	start_address = (ulong *) GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes);
+	
+	if (VALID_MEMBER(kmem_list3_shared) &&
+	    VALID_MEMBER(kmem_cache_s_lists) &&
+	    readmem(cache+OFFSET(kmem_cache_s_lists), KVADDR, &start_address[0], 
+	                     sizeof(ulong) * vt->kmem_cache_len_nodes , 
+        	              "array nodelist array", RETURN_ON_ERROR))  
+		for(i = 0; i < vt->kmem_cache_len_nodes && start_address[i];i++) {
+			if(readmem(start_address[i] + OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *),
+			"kmem_list3 shared", RETURN_ON_ERROR|QUIET) ||
+			readmem(shared + OFFSET(array_cache_limit),
+	       		KVADDR, &limit, sizeof(int), "shared array_cache avail",
+		        RETURN_ON_ERROR|QUIET))
+				if (limit > max_limit)
+					max_limit = limit;
+		}
+	FREEBUF(start_address);
+	return max_limit;
+
 bail_out:
 	vt->flags |= KMEM_CACHE_UNAVAIL;
 	error(INFO, "unable to initialize kmem slab cache subsystem\n\n");
@@ -7045,7 +7104,11 @@ dump_kmem_cache_percpu_v2(struct meminfo
 	for (i = 0; i < vt->kmem_max_cpus; i++) 
 		si->cpudata[i] = (ulong *)
 			GETBUF(vt->kmem_max_limit * sizeof(ulong)); 
-	si->shared_array_cache = (ulong *)
+	if(vt->flags & PERCPU_KMALLOC_V2_NODES)
+		si->shared_array_cache = (ulong *)
+			GETBUF(vt->kmem_cache_len_nodes *  vt->kmem_max_limit * sizeof(ulong)); 
+	else
+		si->shared_array_cache = (ulong *)
 			GETBUF(vt->kmem_max_limit * sizeof(ulong)); 
 
 	cnt = 0;
@@ -7129,7 +7192,10 @@ dump_kmem_cache_percpu_v2(struct meminfo
                 	"kmem_cache_s num", FAULT_ON_ERROR);
 		si->c_num = (ulong)tmp_val;
 
-		do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si);
+		if( vt->flags &  PERCPU_KMALLOC_V2_NODES )
+			do_slab_chain_percpu_v2_nodes(SLAB_GET_COUNTS, si);
+		else
+			do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si);
 
 		if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) {
 			DUMP_KMEM_CACHE_INFO_V2();
@@ -7143,11 +7209,15 @@ dump_kmem_cache_percpu_v2(struct meminfo
 
 		if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) {
 
+		if( !(vt->flags &  PERCPU_KMALLOC_V2_NODES))
 			gather_cpudata_list_v2(si);
 
                         si->slab = (si->flags & ADDRESS_SPECIFIED) ?
                         	vaddr_to_slab(si->spec_addr) : 0;
 
+		if( vt->flags &  PERCPU_KMALLOC_V2_NODES )
+			do_slab_chain_percpu_v2_nodes(SLAB_WALKTHROUGH, si);
+		else 
 			do_slab_chain_percpu_v2(SLAB_WALKTHROUGH, si);
 
 			if (si->found) {
@@ -7836,6 +7906,234 @@ do_slab_chain_percpu_v2(long cmd, struct
 	}
 }
 
+
+/* 
+* Added To  Traverse the Nodelists 
+*/
+
+static void
+do_slab_chain_percpu_v2_nodes(long cmd, struct meminfo *si)
+{
+	int i, tmp, s;
+	int list_borked;
+	char *slab_buf;
+	ulong specified_slab;
+	ulong last;
+	ulong slab_chains[SLAB_CHAINS];
+	ulong *start_address;
+	int index;
+
+	list_borked = 0;
+	si->slabsize = (power(2, si->order) * PAGESIZE());
+	si->cpucached_slab = 0;
+	start_address = (ulong *) GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes);
+
+	if (!readmem(si->cache+OFFSET(kmem_cache_s_lists), 
+                      KVADDR, &start_address[0], 
+                     sizeof(ulong) * vt->kmem_cache_len_nodes , 
+                      "array nodelist array", RETURN_ON_ERROR)) 
+                          error(INFO, "Error encountered with reading nodelists"); 
+
+
+	switch (cmd)
+	{
+	case SLAB_GET_COUNTS:
+		si->flags |= SLAB_GET_COUNTS;
+		si->flags &= ~SLAB_WALKTHROUGH;
+		si->cpucached_cache = 0;
+        	si->num_slabs = si->inuse = 0;
+		slab_buf = GETBUF(SIZE(slab));
+		for( index=0 ; index < vt->kmem_cache_len_nodes && start_address[index] ; index++)
+		{ 
+			slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial);
+			slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full);
+		        slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free);
+			
+			gather_cpudata_list_v2_nodes(si,index); 
+	
+		        if (CRASHDEBUG(1)) {
+                	fprintf(fp, "[ %s: %lx ", si->curname, si->cache);
+	                fprintf(fp, "partial: %lx full: %lx free: %lx ]\n",
+                        slab_chains[0], slab_chains[1], slab_chains[2]);
+			}
+
+			for (s = 0; s < SLAB_CHAINS; s++) {
+				if (!slab_chains[s])
+					continue;
+	
+		                if (!readmem(slab_chains[s],
+	        	            KVADDR, &si->slab, sizeof(ulong),
+	                	    "first slab", QUIET|RETURN_ON_ERROR)) {
+	                                error(INFO, 
+					    "%s: %s list: bad slab pointer: %lx\n",
+                	                        si->curname,
+						slab_chain_name_v2[s],
+                                	        slab_chains[s]);
+					list_borked = 1;
+					continue;
+				}
+	
+				if (slab_data_saved(si)) {
+					FREEBUF(slab_buf);
+					FREEBUF(start_address);
+					return;
+				}
+			
+				if (si->slab == slab_chains[s]) 
+					continue;
+	
+				last = slab_chains[s];
+
+				do {
+	        	                if (received_SIGINT()) {
+						FREEBUF(slab_buf);
+						FREEBUF(start_address);
+	                        	        restart(0);
+					}
+
+					if (!verify_slab_v2(si, last, s)) {
+						list_borked = 1;
+						continue;
+						}
+					last = si->slab - OFFSET(slab_list);
+		
+		        	        readmem(si->slab, KVADDR, slab_buf, 
+						SIZE(slab), "slab buffer", 
+						FAULT_ON_ERROR);
+		
+					tmp = INT(slab_buf + OFFSET(slab_inuse));
+					si->inuse += tmp;
+	
+					if (ACTIVE())
+						gather_cpudata_list_v2_nodes(si,index); 
+
+					si->s_mem = ULONG(slab_buf + 
+						OFFSET(slab_s_mem));
+					gather_slab_cached_count(si);
+	
+					si->num_slabs++;
+		
+					si->slab = ULONG(slab_buf + 
+						OFFSET(slab_list));
+					si->slab -= OFFSET(slab_list);
+
+				/*
+				 *  Check for slab transition. (Tony Dziedzic)
+				 */
+					for (i = 0; i < SLAB_CHAINS; i++) {
+     						if ((i != s) && 
+						    (si->slab == slab_chains[i])) {
+       							error(NOTE, 
+		  	                      "%s: slab chain inconsistency: %s list\n",
+								si->curname,
+								slab_chain_name_v2[s]);
+       							list_borked = 1;
+     						}
+					}
+			
+				} while (si->slab != slab_chains[s] && !list_borked);
+			}
+		}
+
+		if (!list_borked)
+			save_slab_data(si);
+		break;
+
+	case SLAB_WALKTHROUGH:
+		specified_slab = si->slab;     
+		si->flags |= SLAB_WALKTHROUGH;
+		si->flags &= ~SLAB_GET_COUNTS;
+		slab_buf = GETBUF(SIZE(slab));
+		for( index=0 ; index < vt->kmem_cache_len_nodes && start_address[index] ; index++)
+		{ 
+			slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial);
+			slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full);
+		        slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free);
+	
+			gather_cpudata_list_v2_nodes(si,index);
+ 
+		        if (CRASHDEBUG(1)) {
+                	fprintf(fp, "[ %s: %lx ", si->curname, si->cache);
+	                fprintf(fp, "partial: %lx full: %lx free: %lx ]\n",
+                        slab_chains[0], slab_chains[1], slab_chains[2]);
+			}
+
+			for (s = 0; s < SLAB_CHAINS; s++) {
+				if (!slab_chains[s])
+					continue;
+
+	        	if (!specified_slab) {
+	                	if (!readmem(slab_chains[s],
+	       	                    KVADDR, &si->slab, sizeof(ulong),
+	               	            "slabs", QUIET|RETURN_ON_ERROR)) {
+                               	        error(INFO,
+	                                        "%s: %s list: bad slab pointer: %lx\n",
+                                                si->curname,
+						slab_chain_name_v2[s],
+                       	                        slab_chains[s]);
+						list_borked = 1;
+						continue;
+					}
+					last = slab_chains[s];
+				} else
+					last = 0;
+			
+				if (si->slab == slab_chains[s])
+					continue;
+				
+				readmem(si->slab, KVADDR, slab_buf, 
+						SIZE(slab), "slab buffer", 
+						FAULT_ON_ERROR);
+		
+				si->s_mem = ULONG(slab_buf + 
+						OFFSET(slab_s_mem));
+
+	
+				if (CRASHDEBUG(1)) {
+					fprintf(fp, "search cache: [%s] ", si->curname);
+					if (si->flags & ADDRESS_SPECIFIED) 
+						fprintf(fp, "for %llx", si->spec_addr);
+					fprintf(fp, "\n");
+					}
+	
+			        do {
+		                        if (received_SIGINT())
+					{
+						FREEBUF(start_address);
+						FREEBUF(slab_buf);
+	        	                        restart(0);
+					}
+	
+                        	        if (!verify_slab_v2(si, last, s)) {
+                                	        list_borked = 1;
+                                        	continue;
+	                                }
+        	                        last = si->slab - OFFSET(slab_list);
+	
+			                dump_slab_percpu_v2(si);
+					
+					if (si->found) {
+						FREEBUF(start_address);
+						FREEBUF(slab_buf);
+						return;
+					}
+		
+			                readmem(si->slab+OFFSET(slab_list),
+			                        KVADDR, &si->slab, sizeof(ulong),
+			                        "slab list", FAULT_ON_ERROR);
+			
+					si->slab -= OFFSET(slab_list);
+	
+			        } while (si->slab != slab_chains[s] && !list_borked);
+			}
+		}
+
+		break;
+	}
+	FREEBUF(slab_buf);
+	FREEBUF(start_address);
+}
+
 /*
  *  Try to preclude any attempt to translate a bogus slab structure.
  */
@@ -8757,6 +9055,102 @@ gather_cpudata_list_v2(struct meminfo *s
                 	fprintf(fp, "  %lx (shared list)\n", si->shared_array_cache[j]);
 }
 
+
+
+/*
+ *  Updated gather_cpudata_list_v2 to take care of recent changes in kmem_cache 
+ */
+
+static void
+gather_cpudata_list_v2_nodes(struct meminfo *si, int index)
+{
+        int i, j;
+	int avail;
+        ulong cpudata[NR_CPUS];
+	ulong shared;
+	ulong *start_address;
+
+	start_address = (ulong *) GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes);
+        readmem(si->cache+OFFSET(kmem_cache_s_array),
+                KVADDR, &cpudata[0], 
+		sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array),
+                "array_cache array", FAULT_ON_ERROR);
+
+        for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && 
+	     (cpudata[i]) && !(index); i++) {
+		BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit);
+
+                readmem(cpudata[i]+OFFSET(array_cache_avail),
+                        KVADDR, &avail, sizeof(int),
+                        "array cache avail", FAULT_ON_ERROR);
+
+		if (!avail) 
+			continue;
+
+		if (avail > vt->kmem_max_limit) {
+			error(INFO, 
+	  	  "\"%s\" cache: array_cache.avail %d greater than limit %ld\n",
+				si->curname, avail, vt->kmem_max_limit);
+			si->errors++;
+		}
+
+		if (CRASHDEBUG(2))
+			fprintf(fp, "%s: cpu[%d] avail: %d\n", 
+				si->curname, i, avail);
+		
+                readmem(cpudata[i]+SIZE(array_cache),
+                        KVADDR, si->cpudata[i],
+			sizeof(void *) * avail,
+                        "array_cache avail", FAULT_ON_ERROR);
+
+		if (CRASHDEBUG(2))
+			for (j = 0; j < avail; j++)
+				fprintf(fp, "  %lx (cpu %d)\n", si->cpudata[i][j], i);
+        }
+
+        /*
+         *  If the shared list contains anything, gather them as well.
+         */
+	if(!index){
+		BZERO(si->shared_array_cache, sizeof(ulong) * vt->kmem_max_limit * vt->kmem_cache_len_nodes);
+		si->current_cache_index = 0;
+	}
+	if (!readmem(si->cache+OFFSET(kmem_cache_s_lists), KVADDR, &start_address[0], 
+	                     sizeof(ulong) * vt->kmem_cache_len_nodes , 
+        	              "array nodelist array", RETURN_ON_ERROR) ||  
+	!readmem(start_address[index] + OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *),
+	"kmem_list3 shared", RETURN_ON_ERROR|QUIET) ||
+	!readmem(shared + OFFSET(array_cache_avail),
+       	KVADDR, &avail, sizeof(int), "shared array_cache avail",
+        RETURN_ON_ERROR|QUIET) || !avail){
+	FREEBUF(start_address);
+	return;
+	}
+
+	if (avail > vt->kmem_max_limit) {
+		error(INFO, 
+  	  "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n",
+			si->curname, avail, vt->kmem_max_limit);
+		si->errors++;
+		FREEBUF(start_address);
+		return;
+	}
+
+	if (CRASHDEBUG(2))
+		fprintf(fp, "%s: shared avail: %d\n", 
+			si->curname, avail);
+
+        readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache + si->current_cache_index,
+        	sizeof(void *) * avail, "shared array_cache avail", 
+		FAULT_ON_ERROR);
+        if (CRASHDEBUG(2))
+        	for (j = si->current_cache_index; j < (si->current_cache_index +  avail); j++)
+                	fprintf(fp, "  %lx (shared list)\n", si->shared_array_cache[j]);
+	
+	si->current_cache_index += avail;
+	FREEBUF(start_address);
+}
+
 /*
  *  Check whether a given address is contained in the previously-gathered
  *  percpu object cache.
@@ -8792,7 +9186,7 @@ check_shared_list(struct meminfo *si, ul
 	    !si->shared_array_cache)
 		return FALSE;
 
-        for (i = 0; i < si->shared_array_cache[i]; i++) {
+        for (i = 0;  si->shared_array_cache[i]; i++) {
 		if (si->shared_array_cache[i] == obj)
 			return TRUE;
 	}
@@ -9149,6 +9543,7 @@ dump_vm_table(int verbose)
 	fprintf(fp, "      kmem_max_cpus: %ld\n", vt->kmem_max_cpus);
 	fprintf(fp, "   kmem_cache_count: %ld\n", vt->kmem_cache_count);
 	fprintf(fp, " kmem_cache_namelen: %d\n", vt->kmem_cache_namelen);
+	fprintf(fp, " kmem_cache_nodelist_len: %ld\n", vt->kmem_cache_len_nodes);
 	fprintf(fp, "        PG_reserved: %lx\n", vt->PG_reserved);
 	fprintf(fp, "            PG_slab: %ld\n", vt->PG_slab);
 	fprintf(fp, "        paddr_prlen: %d\n", vt->paddr_prlen);
--- crash-4.0-2.22.old/defs.h	2006-03-24 02:21:06.000000000 +0530
+++ crash-4.0-2.22.new/defs.h	2006-04-07 21:21:49.103517736 +0530
@@ -1485,6 +1485,7 @@ struct vm_table {                /* kern
 	ulong kmem_max_limit;
 	ulong kmem_max_cpus;
 	ulong kmem_cache_count;
+	ulong kmem_cache_len_nodes;
 	ulong PG_reserved;
 	ulong PG_slab;
 	int kmem_cache_namelen;
@@ -1521,6 +1522,7 @@ struct vm_table {                /* kern
 #define DISCONTIGMEM		(0x200)
 #define SPARSEMEM		(0x400)
 #define SPARSEMEM_EX		(0x800)
+#define PERCPU_KMALLOC_V2_NODES   (0x1000)
 
 #define IS_FLATMEM()		(vt->flags & FLATMEM)
 #define IS_DISCONTIGMEM()	(vt->flags & DISCONTIGMEM)

[Index of Archives]     [Fedora Development]     [Fedora Desktop]     [Fedora SELinux]     [Yosemite News]     [KDE Users]     [Fedora Tools]

 

Powered by Linux