Re: [PATCH] runq: make tasks in throttled cfs_rqs/rt_rqs displayed

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hello Dave,

于 2012年10月26日 02:26, Dave Anderson 写道:
> 
> 
> ----- Original Message -----
>> Hello Dave,
>>
>> Sorry about not testing the patch fully enough. And I think we
>> should make a discussion about the first patch. I have done some
>> tests with the patch, and I attached it. So could you please test
>> it in your box again.
> 
> 
> Hello Zhang,
> 
> I applied only your new patch1 (the old patch2 no longer applies
> after this new patch1), and I see this:
> 
>  $ make warn
>   ...
>   cc -c -g -DX86_64  -DGDB_7_3_1  task.c -Wall -O2 -Wstrict-prototypes -Wmissing-prototypes -fstack-protector 
>   task.c: In function ‘dump_CFS_runqueues’:
>   task.c:7693:6: warning: variable 'tot' set but not used [-Wunused-but-set-variable]
>   ...
>  

Very sorry about this, it is strange that I didn't find it.

> And I still (always) see the same problem with a live kernel:
> 
>  crash> set
>      PID: 25998
>  COMMAND: "crash"
>     TASK: ffff88020fd9dc40  [THREAD_INFO: ffff88017b6d2000]
>      CPU: 2
>    STATE: TASK_RUNNING (ACTIVE)
>  crash> runq
>  CPU 0 RUNQUEUE: ffff88021e213cc0
>    CURRENT: PID: 0      TASK: ffffffff81c13420  COMMAND: "swapper/0"
>    RT PRIO_ARRAY: ffff88021e213e28
>       [no tasks queued]
>    CFS RB_ROOT: ffff88021e213d58
>       GROUP CFS RB_ROOT: ffff88020ec3b800runq: invalid kernel virtual address: 48  type: "cgroup dentry"
>  crash>
> 
> I also still see numerous instances of the error above with some 
> (but not all) of my "snapshot" dumpfiles, where your dump_task_group_name()
> function is encountering (and trying to use) a NULL cgroup address here:
> 

I tested the patch on RHEL7.0ALPHA2, and I think it is the autogroup thing.
So I add the cgroup check in the patch, it cgroup is 0, function dump_task_group_name will just return.


>  static void
>  dump_task_group_name(ulong group)
>  {
>          ulong cgroup, dentry, name;
>          char *dentry_buf;
>          int len;
>          char tmp_buf[100];
>  
>          readmem(group + OFFSET(task_group_css) + OFFSET(cgroup_subsys_state_cgroup),
>                  KVADDR, &cgroup, sizeof(ulong),
>                  "task_group css cgroup", FAULT_ON_ERROR);
>          readmem(cgroup + OFFSET(cgroup_dentry), KVADDR, &dentry, sizeof(ulong),
>                  "cgroup dentry", FAULT_ON_ERROR);
>   
> Here are the examples, where it always happens on the "crash" process while
> it's performing the snapshot file creation:
> 
> 2.6.38.2-9.fc15 snapshot:
>  
>  crash> runq
>  CPU 0 RUNQUEUE: ffff88003fc13840
>    CURRENT: PID: 1180   TASK: ffff88003bea2e40  COMMAND: "crash"
>    RT PRIO_ARRAY: ffff88003fc13988
>       [no tasks queued]
>    CFS RB_ROOT: ffff88003fc138d8
>       GROUP CFS RB_ROOT: ffff880037ef1b00runq: invalid kernel virtual address: 38  type: "cgroup dentry"
>  crash>
>  
>  
> 2.6.40.4-5.fc15 snapshot:
>  
>  crash> runq
>  ...
>  CPU 1 RUNQUEUE: ffff88003fc92540
>    CURRENT: PID: 1341   TASK: ffff880037409730  COMMAND: "crash"
>    RT PRIO_ARRAY: ffff88003fc92690
>       [no tasks queued]
>    CFS RB_ROOT: ffff88003fc925d8
>       GROUP CFS RB_ROOT: ffff880037592f00runq: invalid kernel virtual address: 38  type: "cgroup dentry"
>  crash>
>  
> 3.5.1-1.fc17 snapshot:
>  
>  crash> runq
>  ...
>  CPU 1 RUNQUEUE: ffff88003ed13800
>    CURRENT: PID: 31736  TASK: ffff88007c46ae20  COMMAND: "crash"
>    RT PRIO_ARRAY: ffff88003ed13968
>       [no tasks queued]
>    CFS RB_ROOT: ffff88003ed13898
>       GROUP CFS RB_ROOT: ffff88003deb3000runq: invalid kernel virtual address: 48  type: "cgroup dentry"
>  crash>
>  
> 3.1.7-1.fc16 snapshot:
>  
>  crash> runq
>  ...
>  CPU 2 RUNQUEUE: ffff88003e253180
>    CURRENT: PID: 1495   TASK: ffff880037a60000  COMMAND: "crash"
>    RT PRIO_ARRAY: ffff88003e2532d0
>       [no tasks queued]
>    CFS RB_ROOT: ffff88003e253218
>       GROUP CFS RB_ROOT: ffff8800277f8500runq: invalid kernel virtual address: 38  type: "cgroup dentry"
>  crash>
> 
> 3.2.6-3.fc16 snapshot:
> 
>  crash> runq
>  ...
>  CPU 0 RUNQUEUE: ffff88003fc13780
>    CURRENT: PID: 1383   TASK: ffff88003c932e40  COMMAND: "crash"
>    RT PRIO_ARRAY: ffff88003fc13910
>       [no tasks queued]
>    CFS RB_ROOT: ffff88003fc13820
>       GROUP CFS RB_ROOT: ffff88003a432c00runq: invalid kernel virtual address: 38  type: "cgroup dentry"
>  crash>
> 
> But I also saw the error above on this 3.2.1-0.8.el7.x86_64 kernel
> that actually crashed:
> 
>  crash> runq
>  ...
>  CPU 3 RUNQUEUE: ffff8804271d43c0
>    CURRENT: PID: 11615  TASK: ffff88020c50a670  COMMAND: "runtest.sh"
>    RT PRIO_ARRAY: ffff8804271d4590
>       [no tasks queued]
>    CFS RB_ROOT: ffff8804271d44a0
>       GROUP CFS RB_ROOT: ffff88041e0d2760runq: invalid kernel virtual address: 38  type: "cgroup dentry"
>  crash>
> 
> 
>> will be fixed in patch2 later.
> 
> With respect to your patch2:
> 
>  +#define MAX_THROTTLED_RQ 100
>  +struct throttled_rq {
>  +       ulong rq;
>  +       int depth;
>  +       int prio;
>  +};
>  +static struct throttled_rq throttled_rt_rq_array[MAX_THROTTLED_RQ];
>  +static struct throttled_rq throttled_cfs_rq_array[MAX_THROTTLED_RQ];
> 
> Can you please dynamically allocate the throttled_rt_rq_array and 
> throttled_cfs_rq_array arrays with GETBUF(), perhaps in the 
> task_group_offset_init() function?  They are only needed when
> "runq" is executed, and then only if the kernel version supports
> them.  You can FREEBUF() them at the bottom of dump_CFS_runqueues(),
> and if the command fails prematurely, they will be FREEBUF()'d 
> automatically by restore_sanity().

OK.

> 
> But this leads to the larger question of showing the task_group
> data.  Consider that the current "runq" command does what it says
> it does: 
> 
>  crash> help runq
>  NAME
>    runq - run queue
>  
>  SYNOPSIS
>    runq [-t]
>  
>  DESCRIPTION
>    With no argument, this command displays the tasks on the run queues
>    of each cpu.
>   
>     -t  Display the timestamp information of each cpu's runqueue, which is the
>         rq.clock, rq.most_recent_timestamp or rq.timestamp_last_tick value,
>         whichever applies; following each cpu timestamp is the last_run or 
>         timestamp value of the active task on that cpu, whichever applies, 
>         along with the task identification.
>    ...
>   
> Now, your patch adds signficant complexity to the runq handling code
> and to its future maintainability.  I'm wondering whether your patch
> can be modified such that the task_group info would only be displayed
> via a new flag, let's say "runq -g".  It seems that there has been 
> considerable churn in the kernel code in this area, and it worries me 
> that this patch will potentially and unnecessarily cause the breakage 
> of the simple display of the queued tasks. 
> 

Currently, rt_rq is displayed hierarchically, while the cfs_rq is displayed
like that. So I made the first patch to make tasks in cfs_rq displayed
hierarchically so that we could see which task belongs to which cfs_rq
easily, just like rt_rq.

The second patch is used to display tasks in throttled cfs_rqs/rt_rqs.
To display tasks in throttled cfs_rqs/rt_rqs is easy, but to display them
sorted in the current runqueue is kind of difficult, so the patch2 looks
complex. I think I will implement the patch2 by two ways, one is just the fix
version of the current patch2, the other is to use the new flag '-g'.
You can decide which one to apply.

The attachment is the patch1.

Thanks
Zhang

>From 7991a5f73cd5ce1c11b89247567a05673e70e387 Mon Sep 17 00:00:00 2001
From: Zhang Yanfei <zhangyanfei@xxxxxxxxxxxxxx>
Date: Fri, 26 Oct 2012 15:45:57 +0800
Subject: [PATCH] runq: make tasks in cfs_rq displayed hierarchically

Signed-off-by: Zhang Yanfei <zhangyanfei@xxxxxxxxxxxxxx>
---
 defs.h    |   11 ++++++
 symbols.c |   22 +++++++++++
 task.c    |  116 +++++++++++++++++++++++++++++++++++++++++++++++++++++--------
 3 files changed, 134 insertions(+), 15 deletions(-)

diff --git a/defs.h b/defs.h
index 319584f..ce4e35e 100755
--- a/defs.h
+++ b/defs.h
@@ -1792,6 +1792,16 @@ struct offset_table {                    /* stash of commonly-used offsets */
 	long sched_rt_entity_my_q;
 	long neigh_table_hash_shift;
 	long neigh_table_nht_ptr;
+	long task_group_css;
+	long cgroup_subsys_state_cgroup;
+	long cgroup_dentry;
+	long task_group_rt_rq;
+	long cfs_rq_tg;
+	long task_group_cfs_rq;
+	long rt_rq_tg;
+	long task_group_parent;
+	long task_group_siblings;
+	long task_group_children;
 };
 
 struct size_table {         /* stash of commonly-used sizes */
@@ -1927,6 +1937,7 @@ struct size_table {         /* stash of commonly-used sizes */
 	long log;
 	long log_level;
 	long rt_rq;
+	long task_group;
 };
 
 struct array_table {
diff --git a/symbols.c b/symbols.c
index 1f09c9f..1127e3b 100755
--- a/symbols.c
+++ b/symbols.c
@@ -8820,6 +8820,26 @@ dump_offset_table(char *spec, ulong makestruct)
 		OFFSET(log_flags_level));
 	fprintf(fp, "          sched_rt_entity_my_q: %ld\n",
 		OFFSET(sched_rt_entity_my_q));
+	fprintf(fp, "                task_group_css: %ld\n",
+		OFFSET(task_group_css));
+	fprintf(fp, "    cgroup_subsys_state_cgroup: %ld\n",
+		OFFSET(cgroup_subsys_state_cgroup));
+	fprintf(fp, "                 cgroup_dentry: %ld\n",
+		OFFSET(cgroup_dentry));
+	fprintf(fp, "              task_group_rt_rq: %ld\n",
+		OFFSET(task_group_rt_rq));
+	fprintf(fp, "                      rt_rq_tg: %ld\n",
+		OFFSET(rt_rq_tg));
+	fprintf(fp, "             task_group_cfs_rq: %ld\n",
+		OFFSET(task_group_cfs_rq));
+	fprintf(fp, "                     cfs_rq_tg: %ld\n",
+		OFFSET(cfs_rq_tg));
+	fprintf(fp, "             task_group_parent: %ld\n",
+		OFFSET(task_group_parent));
+	fprintf(fp, "           task_group_siblings: %ld\n",
+		OFFSET(task_group_siblings));
+	fprintf(fp, "           task_group_children: %ld\n",
+		OFFSET(task_group_children));
 
 	fprintf(fp, "\n                    size_table:\n");
 	fprintf(fp, "                          page: %ld\n", SIZE(page));
@@ -9037,6 +9057,8 @@ dump_offset_table(char *spec, ulong makestruct)
 		SIZE(log_level));
 	fprintf(fp, "                         rt_rq: %ld\n",
 		SIZE(rt_rq));
+	fprintf(fp, "                    task_group: %ld\n",
+		SIZE(task_group));
 
         fprintf(fp, "\n                   array_table:\n");
 	/*
diff --git a/task.c b/task.c
index f8c6325..c77d0a9 100755
--- a/task.c
+++ b/task.c
@@ -64,7 +64,7 @@ static struct rb_node *rb_parent(struct rb_node *, struct rb_node *);
 static struct rb_node *rb_right(struct rb_node *, struct rb_node *);
 static struct rb_node *rb_left(struct rb_node *, struct rb_node *);
 static void dump_task_runq_entry(struct task_context *);
-static int dump_tasks_in_cfs_rq(ulong);
+static int dump_tasks_in_cfs_rq(int, ulong);
 static void dump_on_rq_tasks(void);
 static void dump_CFS_runqueues(void);
 static void dump_RT_prio_array(int, ulong, char *);
@@ -7422,28 +7422,68 @@ rb_next(struct rb_node *node)
 }
 
 static void
+dump_task_group_name(ulong group)
+{
+	ulong cgroup, dentry, name;
+	char *dentry_buf;
+	int len;
+	char tmp_buf[100];
+
+	readmem(group + OFFSET(task_group_css) + OFFSET(cgroup_subsys_state_cgroup),
+		KVADDR, &cgroup, sizeof(ulong),
+		"task_group css cgroup", FAULT_ON_ERROR);
+	if (cgroup == 0)
+		return;
+
+	readmem(cgroup + OFFSET(cgroup_dentry), KVADDR, &dentry, sizeof(ulong),
+		"cgroup dentry", FAULT_ON_ERROR);
+
+	dentry_buf = GETBUF(SIZE(dentry));
+	readmem(dentry, KVADDR, dentry_buf, SIZE(dentry),
+		"dentry", FAULT_ON_ERROR);
+	len = UINT(dentry_buf + OFFSET(dentry_d_name) + OFFSET(qstr_len));
+	name = ULONG(dentry_buf + OFFSET(dentry_d_name) + OFFSET(qstr_name));
+	BZERO(tmp_buf, 100);
+	readmem(name, KVADDR, tmp_buf, len, "qstr name", FAULT_ON_ERROR);
+	fprintf(fp, " <%s> ", tmp_buf);
+}
+
+static void
 dump_task_runq_entry(struct task_context *tc)
 {
 	int prio;
 
 	readmem(tc->task + OFFSET(task_struct_prio), KVADDR, 
 		&prio, sizeof(int), "task prio", FAULT_ON_ERROR);
-	fprintf(fp, "     [%3d] ", prio);
+	fprintf(fp, "[%3d] ", prio);
 	fprintf(fp, "PID: %-5ld  TASK: %lx  COMMAND: \"%s\"\n",
 		tc->pid, tc->task, tc->comm);
 }
 
 static int
-dump_tasks_in_cfs_rq(ulong cfs_rq)
+dump_tasks_in_cfs_rq(int depth, ulong cfs_rq)
 {
 	struct task_context *tc;
 	struct rb_root *root;
 	struct rb_node *node;
 	ulong my_q, leftmost, curr, curr_my_q;
 	int total;
+	ulong tmp;
 
 	total = 0;
 
+	if (depth) {
+		INDENT(2 + 3 * depth);
+		fprintf(fp, "GROUP CFS RB_ROOT: %lx", cfs_rq);
+		if (VALID_MEMBER(cfs_rq_tg) && VALID_MEMBER(task_group_css)) {
+			readmem(cfs_rq + OFFSET(cfs_rq_tg), KVADDR,
+				&tmp, sizeof(ulong), "cfs_rq tg",
+				FAULT_ON_ERROR);
+			dump_task_group_name(tmp);
+		}
+		fprintf(fp, "\n");
+	}
+
 	if (VALID_MEMBER(sched_entity_my_q)) {
 		readmem(cfs_rq + OFFSET(cfs_rq_curr), KVADDR, &curr, 
 			sizeof(ulong), "curr", FAULT_ON_ERROR);
@@ -7451,8 +7491,11 @@ dump_tasks_in_cfs_rq(ulong cfs_rq)
 			readmem(curr + OFFSET(sched_entity_my_q), KVADDR, 
 				&curr_my_q, sizeof(ulong), "curr->my_q", 
 				FAULT_ON_ERROR);
-			if (curr_my_q)
-				total += dump_tasks_in_cfs_rq(curr_my_q);
+			if (curr_my_q) {
+				total++;
+				total += dump_tasks_in_cfs_rq(depth + 1,
+						curr_my_q);
+			}
 		}
 	}
 
@@ -7466,7 +7509,8 @@ dump_tasks_in_cfs_rq(ulong cfs_rq)
 				+ OFFSET(sched_entity_my_q), KVADDR, &my_q,
 				sizeof(ulong), "my_q", FAULT_ON_ERROR);
 			if (my_q) {
-				total += dump_tasks_in_cfs_rq(my_q);
+				total++;
+				total += dump_tasks_in_cfs_rq(depth + 1, my_q);
 				continue;
 			}
 		}
@@ -7475,9 +7519,10 @@ dump_tasks_in_cfs_rq(ulong cfs_rq)
 				     OFFSET(sched_entity_run_node));
 		if (!tc)
 			continue;
-		if (hq_enter((ulong)tc))
+		if (hq_enter((ulong)tc)) {
+			INDENT(5 + 3 * depth);
 			dump_task_runq_entry(tc);
-		else {
+		} else {
 			error(WARNING, "duplicate CFS runqueue node: task %lx\n",
 				tc->task);
 			return total;
@@ -7485,10 +7530,42 @@ dump_tasks_in_cfs_rq(ulong cfs_rq)
 		total++;
 	}
 
+	if (!total) {
+		INDENT(5 + 3 * depth);
+		fprintf(fp, "[no tasks queued]\n");
+	}
+
 	return total;
 }
 
 static void
+task_group_offset_init(void)
+{
+	STRUCT_SIZE_INIT(task_group, "task_group");
+	if (MEMBER_EXISTS("task_group", "css")) {
+		MEMBER_OFFSET_INIT(task_group_css, "task_group", "css");
+		MEMBER_OFFSET_INIT(cgroup_subsys_state_cgroup, "cgroup_subsys_state", "cgroup");
+		MEMBER_OFFSET_INIT(cgroup_dentry, "cgroup", "dentry");
+	}
+
+	if (MEMBER_EXISTS("task_group", "rt_rq")) {
+		MEMBER_OFFSET_INIT(task_group_rt_rq, "task_group", "rt_rq");
+		MEMBER_OFFSET_INIT(rt_rq_tg, "rt_rq", "tg");
+	}
+
+	if (MEMBER_EXISTS("task_group", "cfs_rq")) {
+		MEMBER_OFFSET_INIT(task_group_cfs_rq, "task_group", "cfs_rq");
+		MEMBER_OFFSET_INIT(cfs_rq_tg, "cfs_rq", "tg");
+	}
+
+	if (MEMBER_EXISTS("task_group", "parent")) {
+		MEMBER_OFFSET_INIT(task_group_parent, "task_group", "parent");
+		MEMBER_OFFSET_INIT(task_group_siblings, "task_group", "siblings");
+		MEMBER_OFFSET_INIT(task_group_children, "task_group", "children");
+	}
+}
+
+static void
 dump_on_rq_tasks(void)
 {
 	char buf[BUFSIZE];
@@ -7545,7 +7622,7 @@ dump_on_rq_tasks(void)
 static void
 dump_CFS_runqueues(void)
 {
-	int tot, cpu;
+	int cpu;
 	ulong runq, cfs_rq;
 	char *runqbuf, *cfs_rq_buf;
 	ulong tasks_timeline ATTRIBUTE_UNUSED;
@@ -7586,6 +7663,9 @@ dump_CFS_runqueues(void)
 		MEMBER_OFFSET_INIT(rt_prio_array_queue, "rt_prio_array", "queue");
 	}
 
+	if (!VALID_STRUCT(task_group) && STRUCT_EXISTS("task_group"))
+		task_group_offset_init();
+
 	if (!(rq_sp = per_cpu_symbol_search("per_cpu__runqueues")))
 		error(FATAL, "per-cpu runqueues do not exist\n");
 
@@ -7641,12 +7721,8 @@ dump_CFS_runqueues(void)
 		fprintf(fp, "  CFS RB_ROOT: %lx\n", (ulong)root);
 
 		hq_open();
-		tot = dump_tasks_in_cfs_rq(cfs_rq);
+		dump_tasks_in_cfs_rq(0, cfs_rq);
 		hq_close();
-		if (!tot) {
-			INDENT(5);
-			fprintf(fp, "[no tasks queued]\n");
-		}
 	}
 
 	FREEBUF(runqbuf);
@@ -7665,6 +7741,7 @@ dump_RT_prio_array(int depth, ulong k_prio_array, char *u_prio_array)
 	ulong *tlist;
 	ulong my_q, task_addr;
 	char *rt_rq_buf;
+	ulong tmp;
 
 	if (!depth)
 		fprintf(fp, "  RT PRIO_ARRAY: %lx\n",  k_prio_array);
@@ -7714,8 +7791,17 @@ dump_RT_prio_array(int depth, ulong k_prio_array, char *u_prio_array)
 
 					INDENT(5 + 6 * depth);
 					fprintf(fp, "[%3d] ", i);
-					fprintf(fp, "GROUP RT PRIO_ARRAY: %lx\n",
+					fprintf(fp, "GROUP RT PRIO_ARRAY: %lx",
 						my_q + OFFSET(rt_rq_active));
+					if (VALID_MEMBER(rt_rq_tg) &&
+					    VALID_MEMBER(task_group_css)) {
+						readmem(my_q + OFFSET(rt_rq_tg),
+							KVADDR, &tmp, sizeof(ulong),
+							"rt_rq tg",
+							FAULT_ON_ERROR);
+						dump_task_group_name(tmp);
+					}
+					fprintf(fp, "\n");
 					tot++;
 					dump_RT_prio_array(depth + 1,
 						my_q + OFFSET(rt_rq_active),
-- 
1.7.1

--
Crash-utility mailing list
Crash-utility@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/crash-utility

[Index of Archives]     [Fedora Development]     [Fedora Desktop]     [Fedora SELinux]     [Yosemite News]     [KDE Users]     [Fedora Tools]

 

Powered by Linux