[PATCH] runq: make tasks in throttled cfs_rqs/rt_rqs displayed

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hello Dave,

Currently, runq command doesn't show tasks in throttled cfs_rqs/rt_rqs,
but sometimes we want to display them to help us find the problem. So
I made the patches to make them displayed.

Two patches are attached. and PATCH2 should be applied after PATCH1.

PATCH1: 0001-runq-make-tasks-in-cfs_rq-displayed-hierarchically.patch
this patch is used to change the original display of the cfs_rq tasks.
for example, before applying PATCH1, the display is:

CPU 2 RUNQUEUE: ffff88002c316680
  CURRENT: PID: 4274   TASK: ffff88013aa2eae0  COMMAND: "sh"
  RT PRIO_ARRAY: ffff88002c316808
     [no tasks queued]
  CFS RB_ROOT: ffff88002c316718
     [120] PID: 4296   TASK: ffff88010916d540  COMMAND: "sh"
     [120] PID: 4258   TASK: ffff880135c5eaa0  COMMAND: "sh"
     [120] PID: 4270   TASK: ffff88013aeb6aa0  COMMAND: "sh"
     [120] PID: 4268   TASK: ffff88013b6b4040  COMMAND: "sh"
     [120] PID: 4272   TASK: ffff88013a918080  COMMAND: "sh"
     [120] PID: 4280   TASK: ffff880109028080  COMMAND: "sh"
     [120] PID: 4276   TASK: ffff8801388fa080  COMMAND: "sh"
     [120] PID: 4278   TASK: ffff880109029540  COMMAND: "sh"
     [120] PID: 4260   TASK: ffff88013aeeeae0  COMMAND: "sh"
     [130] PID: 4335   TASK: ffff88011ac3d500  COMMAND: "sosreport"
     [120] PID: 4284   TASK: ffff8801050a7540  COMMAND: "sh"
     [120] PID: 4286   TASK: ffff8801050a6080  COMMAND: "sh"
     [120] PID: 4282   TASK: ffff88011ac48aa0  COMMAND: "sh"
     [120] PID: 4290   TASK: ffff8801041f7540  COMMAND: "sh"
     [120] PID: 4288   TASK: ffff880121530aa0  COMMAND: "sh"
     [120] PID: 4292   TASK: ffff8801041f6080  COMMAND: "sh"
     [120] PID: 4262   TASK: ffff880137b21500  COMMAND: "sh"
     [120] PID: 4266   TASK: ffff88011aeb4aa0  COMMAND: "sh"
     [120] PID: 4264   TASK: ffff88010400b540  COMMAND: "sh"

after applying PATCH1, the display is changed to:

CPU 2 RUNQUEUE: ffff88002c316680
  CURRENT: PID: 4274   TASK: ffff88013aa2eae0  COMMAND: "sh"
  RT PRIO_ARRAY: ffff88002c316808
     [no tasks queued]
  CFS RB_ROOT: ffff88002c316718
     [120] PID: 4296   TASK: ffff88010916d540  COMMAND: "sh"
     [120] PID: 4258   TASK: ffff880135c5eaa0  COMMAND: "sh"
     GROUP CFS RB_ROOT: ffff880138f50200 <test1> 
           GROUP CFS RB_ROOT: ffff88001f8d1e00 <test11> 
                 [120] PID: 4270   TASK: ffff88013aeb6aa0  COMMAND: "sh"
                 [120] PID: 4268   TASK: ffff88013b6b4040  COMMAND: "sh"
                 [120] PID: 4272   TASK: ffff88013a918080  COMMAND: "sh"
           GROUP CFS RB_ROOT: ffff88013a1ba200 <test12> 
                 [120] PID: 4280   TASK: ffff880109028080  COMMAND: "sh"
                 [120] PID: 4276   TASK: ffff8801388fa080  COMMAND: "sh"
                 [120] PID: 4278   TASK: ffff880109029540  COMMAND: "sh"
     [120] PID: 4260   TASK: ffff88013aeeeae0  COMMAND: "sh"
     [130] PID: 4335   TASK: ffff88011ac3d500  COMMAND: "sosreport"
     GROUP CFS RB_ROOT: ffff88013bb75400 <test2> 
           GROUP CFS RB_ROOT: ffff8801381f1000 <test21> 
                 [120] PID: 4284   TASK: ffff8801050a7540  COMMAND: "sh"
                 [120] PID: 4286   TASK: ffff8801050a6080  COMMAND: "sh"
                 [120] PID: 4282   TASK: ffff88011ac48aa0  COMMAND: "sh"
           GROUP CFS RB_ROOT: ffff88001f816000 <test22> 
                 [120] PID: 4290   TASK: ffff8801041f7540  COMMAND: "sh"
                 [120] PID: 4288   TASK: ffff880121530aa0  COMMAND: "sh"
                 [120] PID: 4292   TASK: ffff8801041f6080  COMMAND: "sh"
           [120] PID: 4262   TASK: ffff880137b21500  COMMAND: "sh"
           [120] PID: 4266   TASK: ffff88011aeb4aa0  COMMAND: "sh"
           [120] PID: 4264   TASK: ffff88010400b540  COMMAND: "sh"


PACTH2: 0002-runq-make-tasks-in-throttled-cfs_rqs-rt_rqs-displaye.patch
this patch is used to display tasks in throttled cfs_rqs and rt_rqs.

for example, before applying PATCH2:

CPU 1 RUNQUEUE: ffff88002c296680
  CURRENT: PID: 3494   TASK: ffff88011bcaa080  COMMAND: "sh"
  RT PRIO_ARRAY: ffff88002c296808
     [no tasks queued]
  CFS RB_ROOT: ffff88002c296718
     [120] PID: 3516   TASK: ffff88011bfdd500  COMMAND: "sh"
     [120] PID: 3514   TASK: ffff88011bef2ae0  COMMAND: "sh"

after applying PATCH2:

CPU 1 RUNQUEUE: ffff88002c296680
  CURRENT: PID: 3494   TASK: ffff88011bcaa080  COMMAND: "sh"
  RT PRIO_ARRAY: ffff88002c296808
     [  0] GROUP RT PRIO_ARRAY: ffff88013b351800 <test1> (THROTTLED)
           [  0] GROUP RT PRIO_ARRAY: ffff88013acdb800 <test11> 
                 [  0] PID: 3546   TASK: ffff88010d05e040  COMMAND: "rtloop99"
                 [  1] PID: 3541   TASK: ffff88011bef2080  COMMAND: "rtloop98"
                 [ 54] PID: 3536   TASK: ffff88011bf28080  COMMAND: "rtloop45"
                 [ 98] PID: 3530   TASK: ffff88013ab1aaa0  COMMAND: "rtloop1"
           [  1] PID: 3540   TASK: ffff88011bf29540  COMMAND: "rtloop98"
           [ 54] PID: 3534   TASK: ffff88013ab14040  COMMAND: "rtloop45"
           [ 54] GROUP RT PRIO_ARRAY: ffff88013a207800 <test12> 
                 [ 54] PID: 3537   TASK: ffff88013a378ae0  COMMAND: "rtloop45"
                 [ 98] PID: 3531   TASK: ffff88011bdae080  COMMAND: "rtloop1"
  CFS RB_ROOT: ffff88002c296718
     [120] PID: 3516   TASK: ffff88011bfdd500  COMMAND: "sh"
     [120] PID: 3514   TASK: ffff88011bef2ae0  COMMAND: "sh"


Another example to show the throttled tasks in cfs_rqs:

CPU 1 RUNQUEUE: ffff880028296680
  CURRENT: PID: 7814   TASK: ffff880117bab500  COMMAND: "sh"
  RT PRIO_ARRAY: ffff880028296808
     [no tasks queued]
  CFS RB_ROOT: ffff880028296718
     [120] PID: 2619   TASK: ffff88013812c040  COMMAND: "Xorg"
......
     [120] PID: 7798   TASK: ffff88013a39eae0  COMMAND: "sh"
     GROUP CFS RB_ROOT: ffff880135fc6800 <test1> (DEQUEUED)
           GROUP CFS RB_ROOT: ffff88000ce5bc00 <test12> (THROTTLED)
                 [120] PID: 7820   TASK: ffff8801383ab500  COMMAND: "sh"
                 [120] PID: 7818   TASK: ffff88013ba14ae0  COMMAND: "sh"
                 [120] PID: 7816   TASK: ffff880117baa040  COMMAND: "sh"
     GROUP CFS RB_ROOT: ffff880135fc6800 <test1> (DEQUEUED)
           GROUP CFS RB_ROOT: ffff8800b0b32000 <test11> (THROTTLED)
                 [120] PID: 7810   TASK: ffff8800b0b96aa0  COMMAND: "sh"

*NOTE*
1. The patch also displays the group name that the cfs_rq/rt_rq belows to.
2. There are two statuses about the rqs: DEQUEUED and THROTTLED.
   THROTTLED means this cfs_rq/rt_rq is throttled. And DEQUEUED means the
   running entities in the rq are 0 because no running task on it and its
   child rqs are throttled.

Thanks
Zhang Yanfei
>From ab3eb627ae53074e6e9a94503e5b3022f0532e18 Mon Sep 17 00:00:00 2001
From: zhangyanfei <zhangyanfei@xxxxxxxxxxxxxx>
Date: Mon, 22 Oct 2012 10:35:36 +0800
Subject: [PATCH 1/2] runq: make tasks in cfs_rq displayed hierarchically

Signed-off-by: zhangyanfei <zhangyanfei@xxxxxxxxxxxxxx>
---
 defs.h    |   10 +++++++
 kernel.c  |    2 +
 symbols.c |   20 ++++++++++++++
 task.c    |   84 +++++++++++++++++++++++++++++++++++++++++++++++++++++++------
 4 files changed, 108 insertions(+), 8 deletions(-)

diff --git a/defs.h b/defs.h
index 319584f..4ee550c 100755
--- a/defs.h
+++ b/defs.h
@@ -1792,6 +1792,15 @@ struct offset_table {                    /* stash of commonly-used offsets */
 	long sched_rt_entity_my_q;
 	long neigh_table_hash_shift;
 	long neigh_table_nht_ptr;
+	long task_group_css;
+	long task_group_cfs_bandwidth;
+	long task_group_rt_bandwidth;
+	long task_group_rt_rq;
+	long task_group_cfs_rq;
+	long cfs_rq_tg;
+	long rt_rq_tg;
+	long cgroup_subsys_state_cgroup;
+	long cgroup_dentry;
 };
 
 struct size_table {         /* stash of commonly-used sizes */
@@ -1927,6 +1936,7 @@ struct size_table {         /* stash of commonly-used sizes */
 	long log;
 	long log_level;
 	long rt_rq;
+	long task_group;
 };
 
 struct array_table {
diff --git a/kernel.c b/kernel.c
index 45da48e..d5fee08 100755
--- a/kernel.c
+++ b/kernel.c
@@ -308,6 +308,8 @@ kernel_init()
 	STRUCT_SIZE_INIT(prio_array, "prio_array"); 
 
 	MEMBER_OFFSET_INIT(rq_cfs, "rq", "cfs");
+	if (STRUCT_EXISTS("task_group"))
+		STRUCT_SIZE_INIT(task_group, "task_group");
 
        /*
         *  In 2.4, smp_send_stop() sets smp_num_cpus back to 1
diff --git a/symbols.c b/symbols.c
index 1f09c9f..fc2b5c7 100755
--- a/symbols.c
+++ b/symbols.c
@@ -8820,6 +8820,24 @@ dump_offset_table(char *spec, ulong makestruct)
 		OFFSET(log_flags_level));
 	fprintf(fp, "          sched_rt_entity_my_q: %ld\n",
 		OFFSET(sched_rt_entity_my_q));
+	fprintf(fp, "                task_group_css: %ld\n",
+		OFFSET(task_group_css));
+	fprintf(fp, "      task_group_cfs_bandwidth: %ld\n",
+		OFFSET(task_group_cfs_bandwidth));
+	fprintf(fp, "       task_group_rt_bandwidth: %ld\n",
+		OFFSET(task_group_rt_bandwidth));
+	fprintf(fp, "              task_group_rt_rq: %ld\n",
+		OFFSET(task_group_rt_rq));
+	fprintf(fp, "             task_group_cfs_rq: %ld\n",
+		OFFSET(task_group_cfs_rq));
+	fprintf(fp, "                     cfs_rq_tg: %ld\n",
+		OFFSET(cfs_rq_tg));
+	fprintf(fp, "                      rt_rq_tg: %ld\n",
+		OFFSET(rt_rq_tg));
+	fprintf(fp, "    cgroup_subsys_state_cgroup: %ld\n",
+		OFFSET(cgroup_subsys_state_cgroup));
+	fprintf(fp, "                 cgroup_dentry: %ld\n",
+		OFFSET(cgroup_dentry));
 
 	fprintf(fp, "\n                    size_table:\n");
 	fprintf(fp, "                          page: %ld\n", SIZE(page));
@@ -9037,6 +9055,8 @@ dump_offset_table(char *spec, ulong makestruct)
 		SIZE(log_level));
 	fprintf(fp, "                         rt_rq: %ld\n",
 		SIZE(rt_rq));
+	fprintf(fp, "                    task_group: %ld\n",
+		SIZE(task_group));
 
         fprintf(fp, "\n                   array_table:\n");
 	/*
diff --git a/task.c b/task.c
index f8c6325..e0aff17 100755
--- a/task.c
+++ b/task.c
@@ -64,7 +64,7 @@ static struct rb_node *rb_parent(struct rb_node *, struct rb_node *);
 static struct rb_node *rb_right(struct rb_node *, struct rb_node *);
 static struct rb_node *rb_left(struct rb_node *, struct rb_node *);
 static void dump_task_runq_entry(struct task_context *);
-static int dump_tasks_in_cfs_rq(ulong);
+static int dump_tasks_in_cfs_rq(int, ulong);
 static void dump_on_rq_tasks(void);
 static void dump_CFS_runqueues(void);
 static void dump_RT_prio_array(int, ulong, char *);
@@ -7422,6 +7422,31 @@ rb_next(struct rb_node *node)
 }
 
 static void
+dump_task_group_name(ulong group)
+{
+	ulong cgroup, dentry, name;
+	char *dentry_buf;
+	int len;
+	char buf[BUFSIZ];
+	char tmp_buf[100];
+
+	readmem(group + OFFSET(task_group_css) + OFFSET(cgroup_subsys_state_cgroup),
+		KVADDR, &cgroup, sizeof(ulong),
+		"task_group css cgroup", FAULT_ON_ERROR);
+	readmem(cgroup + OFFSET(cgroup_dentry), KVADDR, &dentry, sizeof(ulong),
+		"cgroup dentry", FAULT_ON_ERROR);
+
+	dentry_buf = GETBUF(SIZE(dentry));
+	readmem(dentry, KVADDR, dentry_buf, SIZE(dentry),
+		"dentry", FAULT_ON_ERROR);
+	len = UINT(dentry_buf + OFFSET(dentry_d_name) + OFFSET(qstr_len));
+	name = ULONG(dentry_buf + OFFSET(dentry_d_name) + OFFSET(qstr_name));
+	BZERO(tmp_buf, 100);
+	readmem(name, KVADDR, tmp_buf, len, "qstr name", FAULT_ON_ERROR);
+	fprintf(fp, " <%s> ", tmp_buf);
+}
+
+static void
 dump_task_runq_entry(struct task_context *tc)
 {
 	int prio;
@@ -7434,16 +7459,27 @@ dump_task_runq_entry(struct task_context *tc)
 }
 
 static int
-dump_tasks_in_cfs_rq(ulong cfs_rq)
+dump_tasks_in_cfs_rq(int depth, ulong cfs_rq)
 {
 	struct task_context *tc;
 	struct rb_root *root;
 	struct rb_node *node;
 	ulong my_q, leftmost, curr, curr_my_q;
 	int total;
+	ulong tmp;
 
 	total = 0;
 
+	if (depth && VALID_MEMBER(cfs_rq_tg)) {
+		readmem(cfs_rq + OFFSET(cfs_rq_tg), KVADDR,
+			&tmp, sizeof(ulong), "cfs_rq tg",
+			FAULT_ON_ERROR);
+		INDENT(-1 + 6 * depth);
+		fprintf(fp, "GROUP CFS RB_ROOT: %lx", cfs_rq);
+		dump_task_group_name(tmp);
+		fprintf(fp, "\n");
+	}
+
 	if (VALID_MEMBER(sched_entity_my_q)) {
 		readmem(cfs_rq + OFFSET(cfs_rq_curr), KVADDR, &curr, 
 			sizeof(ulong), "curr", FAULT_ON_ERROR);
@@ -7452,7 +7488,8 @@ dump_tasks_in_cfs_rq(ulong cfs_rq)
 				&curr_my_q, sizeof(ulong), "curr->my_q", 
 				FAULT_ON_ERROR);
 			if (curr_my_q)
-				total += dump_tasks_in_cfs_rq(curr_my_q);
+				total += dump_tasks_in_cfs_rq(depth + 1,
+						curr_my_q);
 		}
 	}
 
@@ -7466,7 +7503,7 @@ dump_tasks_in_cfs_rq(ulong cfs_rq)
 				+ OFFSET(sched_entity_my_q), KVADDR, &my_q,
 				sizeof(ulong), "my_q", FAULT_ON_ERROR);
 			if (my_q) {
-				total += dump_tasks_in_cfs_rq(my_q);
+				total += dump_tasks_in_cfs_rq(depth + 1, my_q);
 				continue;
 			}
 		}
@@ -7475,9 +7512,10 @@ dump_tasks_in_cfs_rq(ulong cfs_rq)
 				     OFFSET(sched_entity_run_node));
 		if (!tc)
 			continue;
-		if (hq_enter((ulong)tc))
+		if (hq_enter((ulong)tc)) {
+			INDENT(6 * depth);
 			dump_task_runq_entry(tc);
-		else {
+		} else {
 			error(WARNING, "duplicate CFS runqueue node: task %lx\n",
 				tc->task);
 			return total;
@@ -7489,6 +7527,27 @@ dump_tasks_in_cfs_rq(ulong cfs_rq)
 }
 
 static void
+task_group_offset_init(void)
+{
+	if (MEMBER_EXISTS("task_group", "rt_bandwidth")) {
+		MEMBER_OFFSET_INIT(task_group_rt_bandwidth,
+			"task_group", "rt_bandwidth");
+		MEMBER_OFFSET_INIT(task_group_rt_rq, "task_group", "rt_rq");
+		MEMBER_OFFSET_INIT(rt_rq_tg, "rt_rq", "tg");
+		MEMBER_OFFSET_INIT(task_group_css, "task_group", "css");
+		MEMBER_OFFSET_INIT(cgroup_subsys_state_cgroup, "cgroup_subsys_state", "cgroup");
+		MEMBER_OFFSET_INIT(cgroup_dentry, "cgroup", "dentry");
+	}
+
+	if (MEMBER_EXISTS("task_group", "cfs_bandwidth")) {
+		MEMBER_OFFSET_INIT(task_group_cfs_bandwidth,
+			"task_group", "cfs_bandwidth");
+		MEMBER_OFFSET_INIT(task_group_cfs_rq, "task_group", "cfs_rq");
+		MEMBER_OFFSET_INIT(cfs_rq_tg, "cfs_rq", "tg");
+	}
+}
+
+static void
 dump_on_rq_tasks(void)
 {
 	char buf[BUFSIZE];
@@ -7586,6 +7645,9 @@ dump_CFS_runqueues(void)
 		MEMBER_OFFSET_INIT(rt_prio_array_queue, "rt_prio_array", "queue");
 	}
 
+	if (VALID_STRUCT(task_group) && !VALID_MEMBER(task_group_cfs_rq))
+		task_group_offset_init();
+
 	if (!(rq_sp = per_cpu_symbol_search("per_cpu__runqueues")))
 		error(FATAL, "per-cpu runqueues do not exist\n");
 
@@ -7641,7 +7703,7 @@ dump_CFS_runqueues(void)
 		fprintf(fp, "  CFS RB_ROOT: %lx\n", (ulong)root);
 
 		hq_open();
-		tot = dump_tasks_in_cfs_rq(cfs_rq);
+		tot = dump_tasks_in_cfs_rq(0, cfs_rq);
 		hq_close();
 		if (!tot) {
 			INDENT(5);
@@ -7665,6 +7727,7 @@ dump_RT_prio_array(int depth, ulong k_prio_array, char *u_prio_array)
 	ulong *tlist;
 	ulong my_q, task_addr;
 	char *rt_rq_buf;
+	ulong tmp;
 
 	if (!depth)
 		fprintf(fp, "  RT PRIO_ARRAY: %lx\n",  k_prio_array);
@@ -7714,8 +7777,13 @@ dump_RT_prio_array(int depth, ulong k_prio_array, char *u_prio_array)
 
 					INDENT(5 + 6 * depth);
 					fprintf(fp, "[%3d] ", i);
-					fprintf(fp, "GROUP RT PRIO_ARRAY: %lx\n",
+					fprintf(fp, "GROUP RT PRIO_ARRAY: %lx",
 						my_q + OFFSET(rt_rq_active));
+					readmem(my_q + OFFSET(rt_rq_tg), KVADDR,
+						&tmp, sizeof(ulong), "rt_rq tg",
+						FAULT_ON_ERROR);
+					dump_task_group_name(tmp);
+					fprintf(fp, "\n");
 					tot++;
 					dump_RT_prio_array(depth + 1,
 						my_q + OFFSET(rt_rq_active),
-- 
1.7.1

>From 0bb543470c02e5f741bab2bbbe87443791ff5f24 Mon Sep 17 00:00:00 2001
From: zhangyanfei <zhangyanfei@xxxxxxxxxxxxxx>
Date: Tue, 23 Oct 2012 14:49:53 +0800
Subject: [PATCH 2/2] runq: make tasks in throttled cfs_rqs/rt_rqs displayed

Signed-off-by: zhangyanfei <zhangyanfei@xxxxxxxxxxxxxx>
---
 defs.h    |    6 +
 symbols.c |   12 ++
 task.c    |  350 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++----
 3 files changed, 349 insertions(+), 19 deletions(-)

diff --git a/defs.h b/defs.h
index 4ee550c..17fc82f 100755
--- a/defs.h
+++ b/defs.h
@@ -1801,6 +1801,12 @@ struct offset_table {                    /* stash of commonly-used offsets */
 	long rt_rq_tg;
 	long cgroup_subsys_state_cgroup;
 	long cgroup_dentry;
+	long task_group_parent;
+	long task_group_children;
+	long task_group_siblings;
+	long cfs_rq_throttled;
+	long rt_rq_rt_throttled;
+	long rt_rq_highest_prio_curr;
 };
 
 struct size_table {         /* stash of commonly-used sizes */
diff --git a/symbols.c b/symbols.c
index fc2b5c7..716bbf6 100755
--- a/symbols.c
+++ b/symbols.c
@@ -8838,6 +8838,18 @@ dump_offset_table(char *spec, ulong makestruct)
 		OFFSET(cgroup_subsys_state_cgroup));
 	fprintf(fp, "                 cgroup_dentry: %ld\n",
 		OFFSET(cgroup_dentry));
+	fprintf(fp, "             task_group_parent: %ld\n",
+		OFFSET(task_group_parent));
+	fprintf(fp, "           task_group_children: %ld\n",
+		OFFSET(task_group_children));
+	fprintf(fp, "           task_group_siblings: %ld\n",
+		OFFSET(task_group_siblings));
+	fprintf(fp, "              cfs_rq_throttled: %ld\n",
+		OFFSET(cfs_rq_throttled));
+	fprintf(fp, "            rt_rq_rt_throttled: %ld\n",
+		OFFSET(rt_rq_rt_throttled));
+	fprintf(fp, "       rt_rq_highest_prio_curr: %ld\n",
+		OFFSET(rt_rq_highest_prio_curr));
 
 	fprintf(fp, "\n                    size_table:\n");
 	fprintf(fp, "                          page: %ld\n", SIZE(page));
diff --git a/task.c b/task.c
index e0aff17..24c6d95 100755
--- a/task.c
+++ b/task.c
@@ -63,11 +63,17 @@ static struct rb_node *rb_next(struct rb_node *);
 static struct rb_node *rb_parent(struct rb_node *, struct rb_node *);
 static struct rb_node *rb_right(struct rb_node *, struct rb_node *);
 static struct rb_node *rb_left(struct rb_node *, struct rb_node *);
+static void sort_throttled_rq_array(void *, int);
+static void dump_task_group_name(ulong);
 static void dump_task_runq_entry(struct task_context *);
-static int dump_tasks_in_cfs_rq(int, ulong);
+static int dump_tasks_in_cfs_rq(int, ulong, int, int);
+static void fill_throttled_cfs_rq_array(int, ulong, char *, int);
+static void fill_throttled_rt_rq_array(int, ulong, char *, int);
+static ulong get_rt_rq_curr_offset(void);
+static void task_group_offset_init(void);
 static void dump_on_rq_tasks(void);
 static void dump_CFS_runqueues(void);
-static void dump_RT_prio_array(int, ulong, char *);
+static void dump_RT_prio_array(int, ulong, char *, int, int);
 static void task_struct_member(struct task_context *,unsigned int, struct reference *);
 static void signal_reference(struct task_context *, ulong, struct reference *);
 static void do_sig_thread_group(ulong);
@@ -7421,6 +7427,44 @@ rb_next(struct rb_node *node)
         return parent;
 }
 
+#define MAX_THROTTLED_RQ 100
+struct throttled_rq {
+	ulong rq;
+	int depth;
+	int prio;
+};
+static struct throttled_rq throttled_rt_rq_array[MAX_THROTTLED_RQ];
+static struct throttled_rq throttled_cfs_rq_array[MAX_THROTTLED_RQ];
+static int rt_last = 0;
+static int cfs_last = 0;
+
+#define COPY_THROTTLED(t1, t2)                             \
+do {                                                       \
+	t1.rq = t2.rq;                                     \
+	t1.depth = t2.depth;                               \
+	t1.prio = t2.prio;                                 \
+} while (0);
+
+static void
+sort_throttled_rq_array(void *a, int len)
+{
+	int i, j;
+	struct throttled_rq tmp;
+	struct throttled_rq *array = (struct throttled_rq *)a;
+
+	for (i = 0; i < len - 1; i++) {
+		for (j = 0; j < len - i - 1; j++) {
+			if (array[j].depth > array[j+1].depth ||
+			    (array[j].depth == array[j+1].depth &&
+			     array[j].prio > array[j+1].prio)) {
+				COPY_THROTTLED(tmp, array[j+1]);
+				COPY_THROTTLED(array[j+1], array[j]);
+				COPY_THROTTLED(array[j], tmp);
+			}
+		}
+	}
+}
+
 static void
 dump_task_group_name(ulong group)
 {
@@ -7459,14 +7503,14 @@ dump_task_runq_entry(struct task_context *tc)
 }
 
 static int
-dump_tasks_in_cfs_rq(int depth, ulong cfs_rq)
+dump_tasks_in_cfs_rq(int depth, ulong cfs_rq, int cpu, int throttled)
 {
 	struct task_context *tc;
 	struct rb_root *root;
 	struct rb_node *node;
 	ulong my_q, leftmost, curr, curr_my_q;
-	int total;
-	ulong tmp;
+	int total, c, i, delta;
+	ulong p1, p2, t1, t2, th_cfs_rq, tmp, *tg_array;
 
 	total = 0;
 
@@ -7477,6 +7521,8 @@ dump_tasks_in_cfs_rq(int depth, ulong cfs_rq)
 		INDENT(-1 + 6 * depth);
 		fprintf(fp, "GROUP CFS RB_ROOT: %lx", cfs_rq);
 		dump_task_group_name(tmp);
+		if (throttled)
+			fprintf(fp, "(THROTTLED)");
 		fprintf(fp, "\n");
 	}
 
@@ -7489,7 +7535,7 @@ dump_tasks_in_cfs_rq(int depth, ulong cfs_rq)
 				FAULT_ON_ERROR);
 			if (curr_my_q)
 				total += dump_tasks_in_cfs_rq(depth + 1,
-						curr_my_q);
+						curr_my_q, cpu, throttled);
 		}
 	}
 
@@ -7503,7 +7549,8 @@ dump_tasks_in_cfs_rq(int depth, ulong cfs_rq)
 				+ OFFSET(sched_entity_my_q), KVADDR, &my_q,
 				sizeof(ulong), "my_q", FAULT_ON_ERROR);
 			if (my_q) {
-				total += dump_tasks_in_cfs_rq(depth + 1, my_q);
+				total += dump_tasks_in_cfs_rq(depth + 1,
+						my_q, cpu, throttled);
 				continue;
 			}
 		}
@@ -7523,26 +7570,181 @@ dump_tasks_in_cfs_rq(int depth, ulong cfs_rq)
 		total++;
 	}
 
+	for (c = 0; c < cfs_last; c++) {
+		delta = throttled_cfs_rq_array[c].depth - depth;
+		if (delta >= 1) {
+			readmem(cfs_rq + OFFSET(cfs_rq_tg), KVADDR,
+				&t1, sizeof(ulong), "cfs_rq tg",
+				FAULT_ON_ERROR);
+			th_cfs_rq = throttled_cfs_rq_array[c].rq;
+			readmem(th_cfs_rq + OFFSET(cfs_rq_tg), KVADDR,
+				&t2, sizeof(ulong), "cfs_rq tg",
+				FAULT_ON_ERROR);
+			tg_array = (ulong *)GETBUF(delta * sizeof(ulong));
+			for (i = 0; i < delta; i++) {
+				readmem(t2 + OFFSET(task_group_parent), KVADDR,
+					&p2, sizeof(ulong), "task_group parent",
+					FAULT_ON_ERROR);
+				tg_array[i] = t2 = p2;
+			}
+			if (t1 == p2) {
+				for (i = delta - 1; i > 0; i--) {
+					INDENT(-1 + 6 * (depth + delta - i));
+					readmem(tg_array[i - 1] + OFFSET(task_group_cfs_rq),
+						KVADDR, &tmp, sizeof(ulong),
+						"task_group cfs_rq", FAULT_ON_ERROR);
+					readmem(tmp + sizeof(ulong) * cpu, KVADDR,
+						&th_cfs_rq, sizeof(ulong),
+						"task_group cfs_rq", FAULT_ON_ERROR);
+					fprintf(fp, "GROUP CFS RB_ROOT: %lx",
+						th_cfs_rq);
+					dump_task_group_name(tg_array[i-1]);
+					fprintf(fp, "(DEQUEUED)\n");
+				}
+				throttled_cfs_rq_array[c].depth = -1;
+				total += dump_tasks_in_cfs_rq(depth + delta,
+					throttled_cfs_rq_array[c].rq, cpu, 1);
+			}
+			FREEBUF(tg_array);
+		}
+	}
+
+	if (!total) {
+		INDENT(5 + 6 * depth);
+		fprintf(fp, "[no tasks queued]\n");
+	}
 	return total;
 }
 
 static void
+fill_throttled_cfs_rq_array(int depth, ulong group, char *group_buf, int cpu)
+{
+	ulong cfs_rq, tmp;
+	int throttled;
+	ulong kvaddr, uvaddr, offset;
+	ulong list_head[2], next;
+
+	tmp = ULONG(group_buf + OFFSET(task_group_cfs_rq));
+	readmem(tmp + sizeof(ulong) * cpu, KVADDR, &cfs_rq,
+		sizeof(ulong), "task_group cfs_rq", FAULT_ON_ERROR);
+	readmem(cfs_rq + OFFSET(cfs_rq_throttled), KVADDR, &throttled,
+		sizeof(int), "cfs_rq throttled", FAULT_ON_ERROR);
+
+	if (throttled) {
+		throttled_cfs_rq_array[cfs_last].rq = cfs_rq;
+		throttled_cfs_rq_array[cfs_last++].depth = depth;
+	}
+
+	offset = OFFSET(task_group_children);
+	kvaddr = group + offset;
+	uvaddr = (ulong)(group_buf + offset);
+	BCOPY((char *)uvaddr, (char *)&list_head[0], sizeof(ulong)*2);
+
+	if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr))
+		return;
+
+	next = list_head[0];
+	while (next != kvaddr) {
+		group = next - OFFSET(task_group_siblings);
+		readmem(group, KVADDR, group_buf, SIZE(task_group),
+			"task_group", FAULT_ON_ERROR);
+		next = ULONG(group_buf + OFFSET(task_group_siblings) +
+			OFFSET(list_head_next));
+		fill_throttled_cfs_rq_array(depth + 1, group, group_buf, cpu);
+	}
+}
+
+static void
+fill_throttled_rt_rq_array(int depth, ulong group, char *group_buf, int cpu)
+{
+	ulong rt_rq, tmp;
+	int throttled;
+	ulong kvaddr, uvaddr, offset;
+	ulong list_head[2], next;
+	char *rt_rq_buf;
+
+	tmp = ULONG(group_buf + OFFSET(task_group_rt_rq));
+	readmem(tmp + sizeof(ulong) * cpu, KVADDR, &rt_rq,
+		sizeof(ulong), "task_group rt_rq", FAULT_ON_ERROR);
+	rt_rq_buf = GETBUF(SIZE(rt_rq));
+	readmem(rt_rq, KVADDR, rt_rq_buf, SIZE(rt_rq), "rt_rq", FAULT_ON_ERROR);
+	throttled = UINT(rt_rq_buf + OFFSET(rt_rq_rt_throttled));
+
+	if (throttled) {
+		throttled_rt_rq_array[rt_last].rq = rt_rq;
+		throttled_rt_rq_array[rt_last].prio =
+			INT(rt_rq_buf + OFFSET(rt_rq_highest_prio_curr));
+		throttled_rt_rq_array[rt_last++].depth = depth;
+	}
+	FREEBUF(rt_rq_buf);
+
+	offset = OFFSET(task_group_children);
+	kvaddr = group + offset;
+	uvaddr = (ulong)(group_buf + offset);
+	BCOPY((char *)uvaddr, (char *)&list_head[0], sizeof(ulong)*2);
+
+	if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr))
+		return;
+
+	next = list_head[0];
+	while (next != kvaddr) {
+		group = next - OFFSET(task_group_siblings);
+		readmem(group, KVADDR, group_buf, SIZE(task_group),
+			"task_group", FAULT_ON_ERROR);
+		next = ULONG(group_buf + OFFSET(task_group_siblings) +
+			OFFSET(list_head_next));
+		fill_throttled_rt_rq_array(depth + 1, group, group_buf, cpu);
+	}
+}
+
+static ulong
+get_rt_rq_curr_offset(void)
+{
+	int success;
+	char buf[BUFSIZE];
+	char *tokens[100];
+	ulong offset;
+
+	offset = (ulong)-1;
+	sprintf(buf, "print &((struct rt_rq *)0x0)->highest_prio.curr");
+	open_tmpfile();
+		success = gdb_pass_through(buf, NULL, GNU_RETURN_ON_ERROR);
+	rewind(pc->tmpfile);
+	if (success && fgets(buf, BUFSIZE, pc->tmpfile)) {
+		parse_line(buf, tokens);
+		offset = htol(tokens[3], FAULT_ON_ERROR, NULL);
+	}
+
+	close_tmpfile();
+
+	if (!success)
+		error(FATAL, "gdb request failed: %s\n", buf);
+	return offset;
+}
+
+static void
 task_group_offset_init(void)
 {
 	if (MEMBER_EXISTS("task_group", "rt_bandwidth")) {
 		MEMBER_OFFSET_INIT(task_group_rt_bandwidth,
 			"task_group", "rt_bandwidth");
+		MEMBER_OFFSET_INIT(task_group_parent, "task_group", "parent");
+		MEMBER_OFFSET_INIT(task_group_children, "task_group", "children");
+		MEMBER_OFFSET_INIT(task_group_siblings, "task_group", "siblings");
 		MEMBER_OFFSET_INIT(task_group_rt_rq, "task_group", "rt_rq");
+		MEMBER_OFFSET_INIT(rt_rq_rt_throttled, "rt_rq", "rt_throttled");
 		MEMBER_OFFSET_INIT(rt_rq_tg, "rt_rq", "tg");
 		MEMBER_OFFSET_INIT(task_group_css, "task_group", "css");
 		MEMBER_OFFSET_INIT(cgroup_subsys_state_cgroup, "cgroup_subsys_state", "cgroup");
 		MEMBER_OFFSET_INIT(cgroup_dentry, "cgroup", "dentry");
+		ASSIGN_OFFSET(rt_rq_highest_prio_curr) = get_rt_rq_curr_offset();
 	}
 
 	if (MEMBER_EXISTS("task_group", "cfs_bandwidth")) {
 		MEMBER_OFFSET_INIT(task_group_cfs_bandwidth,
 			"task_group", "cfs_bandwidth");
 		MEMBER_OFFSET_INIT(task_group_cfs_rq, "task_group", "cfs_rq");
+		MEMBER_OFFSET_INIT(cfs_rq_throttled, "cfs_rq", "throttled");
 		MEMBER_OFFSET_INIT(cfs_rq_tg, "cfs_rq", "tg");
 	}
 }
@@ -7604,13 +7806,15 @@ dump_on_rq_tasks(void)
 static void
 dump_CFS_runqueues(void)
 {
-	int tot, cpu;
+	int cpu, i;
 	ulong runq, cfs_rq;
 	char *runqbuf, *cfs_rq_buf;
 	ulong tasks_timeline ATTRIBUTE_UNUSED;
 	struct task_context *tc;
 	struct rb_root *root;
 	struct syment *rq_sp, *init_sp;
+	ulong root_task_group;
+	char *group_buf, *group_buf_rt;
 
 	if (!VALID_STRUCT(cfs_rq)) {
 		STRUCT_SIZE_INIT(cfs_rq, "cfs_rq");
@@ -7657,6 +7861,15 @@ dump_CFS_runqueues(void)
 	else
 		cfs_rq_buf = NULL;
 
+	if (VALID_STRUCT(task_group)) {
+		if (symbol_exists("init_task_group"))
+			root_task_group = symbol_value("init_task_group");
+		else if (symbol_exists("root_task_group"))
+			root_task_group = symbol_value("root_task_group");
+		else
+			error(FATAL, "cannot determine root task_group\n");
+	}
+
 	get_active_set();
 
         for (cpu = 0; cpu < kt->cpus; cpu++) {
@@ -7697,18 +7910,53 @@ dump_CFS_runqueues(void)
 				OFFSET(cfs_rq_tasks_timeline));
 		}
 
+		if (VALID_MEMBER(task_group_rt_bandwidth)) {
+			group_buf_rt = GETBUF(SIZE(task_group));
+			readmem(root_task_group, KVADDR, group_buf_rt, SIZE(task_group),
+				"task_group", FAULT_ON_ERROR);
+			fill_throttled_rt_rq_array(0, root_task_group,
+				group_buf_rt, cpu);
+			sort_throttled_rq_array(throttled_rt_rq_array, rt_last);
+			if (CRASHDEBUG(1)) {
+				fprintf(fp, "throttled_rt_rq_array:\n");
+				for (i = 0; i < rt_last; i++) {
+					fprintf(fp, "  [%2d] = {depth=%d, prio=%d, rt_rq=%lx}\n",
+						i, throttled_rt_rq_array[i].depth,
+						throttled_rt_rq_array[i].prio,
+						throttled_rt_rq_array[i].rq);
+				}
+			}
+			FREEBUF(group_buf_rt);
+		}
+
 		dump_RT_prio_array(0, runq + OFFSET(rq_rt) + OFFSET(rt_rq_active),
-			&runqbuf[OFFSET(rq_rt) + OFFSET(rt_rq_active)]);
+			&runqbuf[OFFSET(rq_rt) + OFFSET(rt_rq_active)], cpu, 0);
 
 		fprintf(fp, "  CFS RB_ROOT: %lx\n", (ulong)root);
 
+		if (VALID_MEMBER(task_group_cfs_bandwidth)) {
+			group_buf = GETBUF(SIZE(task_group));
+			readmem(root_task_group, KVADDR, group_buf, SIZE(task_group),
+				"task_group", FAULT_ON_ERROR);
+			fill_throttled_cfs_rq_array(0, root_task_group,
+				group_buf, cpu);
+			sort_throttled_rq_array(throttled_cfs_rq_array, cfs_last);
+			if (CRASHDEBUG(1)) {
+				fprintf(fp, "throttled_cfs_rq_array:\n");
+				for (i = 0; i < cfs_last; i++) {
+					fprintf(fp, "  [%2d] = {depth=%d, cfs_rq=%lx}\n",
+						i, throttled_cfs_rq_array[i].depth,
+						throttled_cfs_rq_array[i].rq);
+				}
+			}
+			FREEBUF(group_buf);
+		}
+
 		hq_open();
-		tot = dump_tasks_in_cfs_rq(0, cfs_rq);
+		dump_tasks_in_cfs_rq(0, cfs_rq, cpu, 0);
 		hq_close();
-		if (!tot) {
-			INDENT(5);
-			fprintf(fp, "[no tasks queued]\n");
-		}
+
+		rt_last = cfs_last = 0;
 	}
 
 	FREEBUF(runqbuf);
@@ -7717,9 +7965,10 @@ dump_CFS_runqueues(void)
 }
 
 static void
-dump_RT_prio_array(int depth, ulong k_prio_array, char *u_prio_array)
+dump_RT_prio_array(int depth, ulong k_prio_array, char *u_prio_array,
+		   int cpu, int throttled)
 {
-	int i, c, tot, cnt, qheads;
+	int i, c, j, tot, cnt, qheads, delta, prio;
 	ulong offset, kvaddr, uvaddr;
 	ulong list_head[2];
         struct list_data list_data, *ld;
@@ -7727,7 +7976,7 @@ dump_RT_prio_array(int depth, ulong k_prio_array, char *u_prio_array)
 	ulong *tlist;
 	ulong my_q, task_addr;
 	char *rt_rq_buf;
-	ulong tmp;
+	ulong p1, p2, t1, t2, rt_rq, tmp, *tg_array;
 
 	if (!depth)
 		fprintf(fp, "  RT PRIO_ARRAY: %lx\n",  k_prio_array);
@@ -7748,7 +7997,7 @@ dump_RT_prio_array(int depth, ulong k_prio_array, char *u_prio_array)
 				i, kvaddr, list_head[0], list_head[1]);
 
 		if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr))
-			continue;
+				continue;
 
 		BZERO(ld, sizeof(struct list_data));
 		ld->start = list_head[0];
@@ -7787,7 +8036,8 @@ dump_RT_prio_array(int depth, ulong k_prio_array, char *u_prio_array)
 					tot++;
 					dump_RT_prio_array(depth + 1,
 						my_q + OFFSET(rt_rq_active),
-						&rt_rq_buf[OFFSET(rt_rq_active)]);
+						&rt_rq_buf[OFFSET(rt_rq_active)],
+						cpu, throttled);
 					continue;
 				} else
 					task_addr -= OFFSET(task_struct_rt);
@@ -7804,6 +8054,68 @@ dump_RT_prio_array(int depth, ulong k_prio_array, char *u_prio_array)
 		FREEBUF(tlist);
 	}
 
+	for (c = 0; c < rt_last; c++) {
+		delta = throttled_rt_rq_array[c].depth - depth;
+		if (delta >= 1) {
+			readmem(k_prio_array - OFFSET(rt_rq_active) +
+				OFFSET(rt_rq_tg), KVADDR,
+				&t1, sizeof(ulong), "rt_rq tg",
+				FAULT_ON_ERROR);
+			rt_rq = throttled_rt_rq_array[c].rq;
+			readmem(rt_rq + OFFSET(rt_rq_tg), KVADDR,
+				&t2, sizeof(ulong), "rt_rq tg",
+				FAULT_ON_ERROR);
+
+			tg_array = (ulong *)GETBUF(delta * sizeof(ulong));
+			tmp = t2;
+			for (j = 0; j < delta; j++) {
+				readmem(tmp + OFFSET(task_group_parent), KVADDR,
+					&p2, sizeof(ulong), "task_group parent",
+					FAULT_ON_ERROR);
+				tg_array[j] = tmp = p2;
+			}
+
+			if (t1 == p2) {
+				for (j = delta - 1; j > 0; j--) {
+					INDENT(-1 + 6 * (depth + delta - j));
+					readmem(tg_array[j - 1] + OFFSET(task_group_rt_rq),
+						KVADDR, &tmp, sizeof(ulong),
+						"task_group rt_rq", FAULT_ON_ERROR);
+					readmem(tmp + sizeof(ulong) * cpu, KVADDR,
+						&rt_rq, sizeof(ulong),
+						"task_group rt_rq", FAULT_ON_ERROR);
+					readmem(rt_rq + OFFSET(rt_rq_highest_prio_curr),
+						KVADDR, &prio, sizeof(ulong),
+						"rt_rq highest_prio curr", FAULT_ON_ERROR);
+					fprintf(fp, "[%3d] ", prio);
+					fprintf(fp, "GROUP RT PRIO_ARRAY: %lx",
+						rt_rq + OFFSET(rt_rq_active));
+					dump_task_group_name(tg_array[j - 1]);
+					fprintf(fp, "(DEQUEUED)\n");
+				}
+				throttled_rt_rq_array[c].depth = -1;
+				prio = throttled_rt_rq_array[c].prio;
+				rt_rq = throttled_rt_rq_array[c].rq;
+				rt_rq_buf = GETBUF(SIZE(rt_rq));
+				readmem(rt_rq, KVADDR, rt_rq_buf, SIZE(rt_rq),
+					"rt_rq", FAULT_ON_ERROR);
+				INDENT(-1 + 6 * (depth + delta));
+				fprintf(fp, "[%3d] ", prio);
+				fprintf(fp, "GROUP RT PRIO_ARRAY: %lx",
+					rt_rq + OFFSET(rt_rq_active));
+				dump_task_group_name(t2);
+				fprintf(fp, "(THROTTLED)\n");
+				tot++;
+				dump_RT_prio_array(depth + delta,
+					rt_rq + OFFSET(rt_rq_active),
+					&rt_rq_buf[OFFSET(rt_rq_active)],
+					cpu, 1);
+				FREEBUF(rt_rq_buf);
+			}
+			FREEBUF(tg_array);
+		}
+	}
+
 	if (!tot) {
 		INDENT(5 + 9 * depth);
 		fprintf(fp, "[no tasks queued]\n");	
-- 
1.7.1

--
Crash-utility mailing list
Crash-utility@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/crash-utility

[Index of Archives]     [Fedora Development]     [Fedora Desktop]     [Fedora SELinux]     [Yosemite News]     [KDE Users]     [Fedora Tools]

 

Powered by Linux