[Crash-utility] Test patch to fix kmem -s bug

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi
	This is a test patch that will fix the problem with kmem -s. Which used
to fail due to modification of kmem_cache structure. But it needs
testing to validate the results, both in Uninode and multi node (NUMA)
architectures.
	Please go through and let me know of your opinions.
Thanks
Sharyathi Nagesh


--- crash-4.0-2.22.old/defs.h	2006-03-24 02:21:06.000000000 +0530
+++ crash-4.0-2.22.new/defs.h	2006-03-29 22:49:10.000000000 +0530
@@ -1485,6 +1485,7 @@ struct vm_table {                /* kern
 	ulong kmem_max_limit;
 	ulong kmem_max_cpus;
 	ulong kmem_cache_count;
+	ulong kmem_cache_len_nodes;
 	ulong PG_reserved;
 	ulong PG_slab;
 	int kmem_cache_namelen;
@@ -1521,6 +1522,7 @@ struct vm_table {                /* kern
 #define DISCONTIGMEM		(0x200)
 #define SPARSEMEM		(0x400)
 #define SPARSEMEM_EX		(0x800)
+#define PERCPU_KMALLOC_V2_NODES   (0x1000)
 
 #define IS_FLATMEM()		(vt->flags & FLATMEM)
 #define IS_DISCONTIGMEM()	(vt->flags & DISCONTIGMEM)
--- crash-4.0-2.22.old/memory.c	2006-03-24 02:21:05.000000000 +0530
+++ crash-4.0-2.22.new/memory.c	2006-04-04 01:00:55.661628248 +0530
@@ -90,6 +90,7 @@ static ulong vaddr_to_slab(ulong);
 static void do_slab_chain(int, struct meminfo *);
 static void do_slab_chain_percpu_v1(long, struct meminfo *);
 static void do_slab_chain_percpu_v2(long, struct meminfo *);
+static void do_slab_chain_percpu_v2_nodes(long, struct meminfo *);
 static void save_slab_data(struct meminfo *);
 static int slab_data_saved(struct meminfo *);
 static void dump_saved_slab_data(void);
@@ -355,13 +356,24 @@ vm_init(void)
 			MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache", "name");
 			MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache", 
 				"colour_off");
-			MEMBER_OFFSET_INIT(kmem_cache_s_objsize,  "kmem_cache", 
+			if(MEMBER_EXISTS("kmem_cache","objsize"))
+				MEMBER_OFFSET_INIT(kmem_cache_s_objsize,  "kmem_cache", 
 				"objsize");
+			else if(MEMBER_EXISTS("kmem_cache","buffer_size"))
+				MEMBER_OFFSET_INIT(kmem_cache_s_objsize,  "kmem_cache", 
+				"buffer_size");
 			MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache", "flags");
 			MEMBER_OFFSET_INIT(kmem_cache_s_gfporder,  
 				"kmem_cache", "gfporder");
 
-			MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", "lists");
+			if(MEMBER_EXISTS("kmem_cache","lists"))
+				MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", "lists");
+			else if(MEMBER_EXISTS("kmem_cache","nodelists"))
+			{
+                		vt->flags |= PERCPU_KMALLOC_V2_NODES;
+				MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", "nodelists");
+				ARRAY_LENGTH_INIT(vt->kmem_cache_len_nodes, NULL, "kmem_cache.nodelists", NULL, 0);
+			}
 			MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache", "array");
 			ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.array", NULL, 0);
 		}
@@ -7129,7 +7141,10 @@ dump_kmem_cache_percpu_v2(struct meminfo
                 	"kmem_cache_s num", FAULT_ON_ERROR);
 		si->c_num = (ulong)tmp_val;
 
-		do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si);
+		if( vt->flags &  PERCPU_KMALLOC_V2_NODES )
+			do_slab_chain_percpu_v2_nodes(SLAB_GET_COUNTS, si);
+		else
+			do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si);
 
 		if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) {
 			DUMP_KMEM_CACHE_INFO_V2();
@@ -7148,6 +7163,9 @@ dump_kmem_cache_percpu_v2(struct meminfo
                         si->slab = (si->flags & ADDRESS_SPECIFIED) ?
                         	vaddr_to_slab(si->spec_addr) : 0;
 
+		if( vt->flags &  PERCPU_KMALLOC_V2_NODES )
+			do_slab_chain_percpu_v2_nodes(SLAB_WALKTHROUGH, si);
+		else 
 			do_slab_chain_percpu_v2(SLAB_WALKTHROUGH, si);
 
 			if (si->found) {
@@ -7836,6 +7854,216 @@ do_slab_chain_percpu_v2(long cmd, struct
 	}
 }
 
+/* Added T Traverse the Nodelists */
+static void
+do_slab_chain_percpu_v2_nodes(long cmd, struct meminfo *si)
+{
+	int i, tmp, s;
+	int list_borked;
+	char *slab_buf;
+	ulong specified_slab;
+	ulong last;
+	ulong slab_chains[SLAB_CHAINS];
+	ulong *start_address;
+	int index;
+
+	list_borked = 0;
+	si->slabsize = (power(2, si->order) * PAGESIZE());
+	si->cpucached_slab = 0;
+	start_address = (ulong *) GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes);
+
+	if (!readmem(si->cache+OFFSET(kmem_cache_s_lists), 
+                      KVADDR, &start_address[0], 
+                     sizeof(ulong) * vt->kmem_cache_len_nodes , 
+                      "array nodelist array", RETURN_ON_ERROR)) 
+                          error(INFO, "Error encountered with reading nodelists"); 
+
+        if (CRASHDEBUG(1)) {
+                fprintf(fp, "[ %s: %lx ", si->curname, si->cache);
+                fprintf(fp, "partial: %lx full: %lx free: %lx ]\n",
+                        slab_chains[0], slab_chains[1], slab_chains[2]);
+        }
+
+	switch (cmd)
+	{
+	case SLAB_GET_COUNTS:
+		si->flags |= SLAB_GET_COUNTS;
+		si->flags &= ~SLAB_WALKTHROUGH;
+		si->cpucached_cache = 0;
+        	si->num_slabs = si->inuse = 0;
+		gather_cpudata_list_v2(si); 
+		slab_buf = GETBUF(SIZE(slab));
+
+		for( index=0 ; index < vt->kmem_cache_len_nodes && start_address[index] ; index++)
+		{ 
+			slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial);
+			slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full);
+		        slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free);
+	
+		        if (CRASHDEBUG(1)) {
+                	fprintf(fp, "[ %s: %lx ", si->curname, si->cache);
+	                fprintf(fp, "partial: %lx full: %lx free: %lx ]\n",
+                        slab_chains[0], slab_chains[1], slab_chains[2]);
+			}
+
+			for (s = 0; s < SLAB_CHAINS; s++) {
+				if (!slab_chains[s])
+					continue;
+	
+		                if (!readmem(slab_chains[s],
+	        	            KVADDR, &si->slab, sizeof(ulong),
+	                	    "first slab", QUIET|RETURN_ON_ERROR)) {
+	                                error(INFO, 
+					    "%s: %s list: bad slab pointer: %lx\n",
+                	                        si->curname,
+						slab_chain_name_v2[s],
+                                	        slab_chains[s]);
+					list_borked = 1;
+					continue;
+				}
+	
+				if (slab_data_saved(si)) {
+					FREEBUF(slab_buf);
+					return;
+				}
+			
+				if (si->slab == slab_chains[s]) 
+					continue;
+	
+				last = slab_chains[s];
+
+				do {
+	        	                if (received_SIGINT()) {
+						FREEBUF(slab_buf);
+	                        	        restart(0);
+					}
+
+					if (!verify_slab_v2(si, last, s)) {
+						list_borked = 1;
+						continue;
+						}
+					last = si->slab - OFFSET(slab_list);
+		
+		        	        readmem(si->slab, KVADDR, slab_buf, 
+						SIZE(slab), "slab buffer", 
+						FAULT_ON_ERROR);
+		
+					tmp = INT(slab_buf + OFFSET(slab_inuse));
+					si->inuse += tmp;
+	
+					if (ACTIVE())
+						gather_cpudata_list_v2(si); 
+
+					si->s_mem = ULONG(slab_buf + 
+						OFFSET(slab_s_mem));
+					gather_slab_cached_count(si);
+	
+					si->num_slabs++;
+		
+					si->slab = ULONG(slab_buf + 
+						OFFSET(slab_list));
+					si->slab -= OFFSET(slab_list);
+
+				/*
+				 *  Check for slab transition. (Tony Dziedzic)
+				 */
+					for (i = 0; i < SLAB_CHAINS; i++) {
+     						if ((i != s) && 
+						    (si->slab == slab_chains[i])) {
+       							error(NOTE, 
+		  	                      "%s: slab chain inconsistency: %s list\n",
+								si->curname,
+								slab_chain_name_v2[s]);
+       							list_borked = 1;
+     						}
+					}
+			
+				} while (si->slab != slab_chains[s] && !list_borked);
+			}
+		}
+
+		FREEBUF(slab_buf);
+		if (!list_borked)
+			save_slab_data(si);
+		break;
+
+	case SLAB_WALKTHROUGH:
+	//	specified_slab = si->slab;     Commented for 
+		si->flags |= SLAB_WALKTHROUGH;
+		si->flags &= ~SLAB_GET_COUNTS;
+		for( index=0 ; index < vt->kmem_cache_len_nodes && start_address[index] ; index++)
+		{ 
+			slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial);
+			slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full);
+		        slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free);
+	
+		        if (CRASHDEBUG(1)) {
+                	fprintf(fp, "[ %s: %lx ", si->curname, si->cache);
+	                fprintf(fp, "partial: %lx full: %lx free: %lx ]\n",
+                        slab_chains[0], slab_chains[1], slab_chains[2]);
+			}
+
+
+			for (s = 0; s < SLAB_CHAINS; s++) {
+				if (!slab_chains[s])
+					continue;
+
+	//        	if (!specified_slab) {
+	                	if (!readmem(slab_chains[s],
+	       	                    KVADDR, &si->slab, sizeof(ulong),
+	               	            "slabs", QUIET|RETURN_ON_ERROR)) {
+                               	        error(INFO,
+	                                        "%s: %s list: bad slab pointer: %lx\n",
+                                                si->curname,
+						slab_chain_name_v2[s],
+                       	                        slab_chains[s]);
+						list_borked = 1;
+						continue;
+					}
+					last = slab_chains[s];
+	//			} else
+	//				last = 0;
+			
+				if (si->slab == slab_chains[s])
+					continue;
+	
+				if (CRASHDEBUG(1)) {
+					fprintf(fp, "search cache: [%s] ", si->curname);
+					if (si->flags & ADDRESS_SPECIFIED) 
+						fprintf(fp, "for %llx", si->spec_addr);
+					fprintf(fp, "\n");
+				}
+	
+			        do {
+		                        if (received_SIGINT())
+	        	                        restart(0);
+	
+                        	        if (!verify_slab_v2(si, last, s)) {
+                                	        list_borked = 1;
+                                        	continue;
+	                                }
+        	                        last = si->slab - OFFSET(slab_list);
+	
+			                dump_slab_percpu_v2(si);
+		
+			                if (si->found) {
+						return;
+					}
+		
+			                readmem(si->slab+OFFSET(slab_list),
+			                        KVADDR, &si->slab, sizeof(ulong),
+			                        "slab list", FAULT_ON_ERROR);
+			
+					si->slab -= OFFSET(slab_list);
+	
+			        } while (si->slab != slab_chains[s] && !list_borked);
+			}
+		}
+
+		break;
+	}
+	FREEBUF(start_address);
+}
 /*
  *  Try to preclude any attempt to translate a bogus slab structure.
  */
@@ -8683,6 +8911,7 @@ gather_cpudata_list_v2(struct meminfo *s
 	int avail;
         ulong cpudata[NR_CPUS];
 	ulong shared;
+	ulong *start_address,temp=0,index=0;
 
         readmem(si->cache+OFFSET(kmem_cache_s_array),
                 KVADDR, &cpudata[0], 
@@ -8726,7 +8955,32 @@ gather_cpudata_list_v2(struct meminfo *s
          */
 	BZERO(si->shared_array_cache, sizeof(ulong) * vt->kmem_max_limit);
 
-        if (!VALID_MEMBER(kmem_list3_shared) ||
+
+       if(vt->flags  & PERCPU_KMALLOC_V2_NODES)
+	{
+	        start_address = (ulong *) GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes);
+	        if (!readmem(si->cache+OFFSET(kmem_cache_s_lists),
+                      KVADDR, &start_address[0],
+                     sizeof(ulong) * vt->kmem_cache_len_nodes ,
+                      "array nodelist array", RETURN_ON_ERROR))
+                          error(INFO, "Error encountered with reading nodelists");
+	
+		for(index=0,avail=0 ; index < vt->kmem_cache_len_nodes && start_address[index] ; index++)
+        	{
+		 if (!VALID_MEMBER(kmem_list3_shared) ||
+        	    !VALID_MEMBER(kmem_cache_s_lists) ||
+		    !readmem(start_address[index] + OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *),
+		    "kmem_list3 shared", RETURN_ON_ERROR|QUIET) ||
+		    !readmem(shared + OFFSET(array_cache_avail),
+        	    KVADDR, &temp, sizeof(int), "shared array_cache avail",
+	            RETURN_ON_ERROR|QUIET) || !temp)
+			return;
+		   avail+=temp;
+		}
+
+	}
+	else{
+	 if (!VALID_MEMBER(kmem_list3_shared) ||
             !VALID_MEMBER(kmem_cache_s_lists) ||
             !readmem(si->cache+OFFSET(kmem_cache_s_lists)+
        	    OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *),
@@ -8735,7 +8989,7 @@ gather_cpudata_list_v2(struct meminfo *s
             KVADDR, &avail, sizeof(int), "shared array_cache avail",
             RETURN_ON_ERROR|QUIET) || !avail)
 		return;
-
+	}
 	if (avail > vt->kmem_max_limit) {
 		error(INFO, 
   	  "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n",

[Index of Archives]     [Fedora Development]     [Fedora Desktop]     [Fedora SELinux]     [Yosemite News]     [KDE Users]     [Fedora Tools]

 

Powered by Linux