Re: [PATCH] Add -r option to timer

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hello Dave,

Changed according to your suggestions. But need to add some explanation
here.

change 1:
Perhaps there could be a way to pre-verify the addresses with
accessible(), and if the address is bogus, display an error message,
but allow the command to continue on with the other cpus?

Error comes from rb_last() which used to calculate the space for
printing expires.

I fixed rb_last(), adding pre-verify the addresses. But I didn't use
rb_last() any more, for change 2 helps to find the last hrtimer.

change 2:

Maybe you can use hq_open()/hq_enter()/hq_close() on the hrtimer addresses
to prevent this from happening, warn the user when it does, and continue on
with the next cpu?

As you suggested.

change3:
When doing some snap test, I found rb_node's data may be destroyed, so
I also fixed other rbtree manipulation functions to avoid abort of
command.

--
--
Regards
Qiao Nuohan


>From 2c3d65962715a94af15b090f96f137730396a864 Mon Sep 17 00:00:00 2001
From: qiaonuohan <qiaonuohan@xxxxxxxxxxxxxx>
Date: Wed, 27 Feb 2013 17:34:11 +0800
Subject: [PATCH 2/2] hrtimer: add -r option to command timer

command timer -r is used to display all hrtimers and their related
information.

e.g.
crash> timer -r
UPTIME: 776348196(1000HZ)

cpu: 0
 clock: 0
  .base:        ffff880002210f48
  .offset:      1361070676510533252
  .get_time:    ktime_get_real
                EXPIRES                      HRTIMER           FUNCTION
1361847061015817487-1361847061015867487  ffff88004bb43d18  ffffffff81094d50  <hrtimer_wakeup>
1361847073001411000-1361847073001461000  ffff88004b1d3d18  ffffffff81094d50  <hrtimer_wakeup>

 clock: 1
  .base:        ffff880002210f88
  .offset:      0
  .get_time:    ktime_get
            EXPIRES                  HRTIMER           FUNCTION
776348200000000-776348200000000  ffff880002211040  ffffffff810a0b70  <tick_sched_timer>
776352046999849-776352046999849  ffff880002211260  ffffffff810d8ab0  <watchdog_timer_fn>
...
---
 defs.h   |   22 ++++
 help.c   |   65 ++++++++++-
 kernel.c |  356 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 3 files changed, 435 insertions(+), 8 deletions(-)

diff --git a/defs.h b/defs.h
index 2993f2b..66e75e4 100755
--- a/defs.h
+++ b/defs.h
@@ -1848,6 +1848,26 @@ struct offset_table {                    /* stash of commonly-used offsets */
 	long vmap_area_list;
 	long vmap_area_flags;
 	long vmap_area_vm;
+	long hrtimer_cpu_base_clock_base;
+	long hrtimer_clock_base_offset;
+	long hrtimer_clock_base_active;
+	long hrtimer_clock_base_first;
+	long hrtimer_clock_base_get_time;
+	long hrtimer_base_active;
+	long hrtimer_base_first;
+	long hrtimer_base_pending;
+	long hrtimer_base_get_time;
+	long hrtimer_node;
+	long hrtimer_list;
+	long hrtimer_softexpires;
+	long hrtimer_expires;
+	long hrtimer_function;
+	long timerqueue_head_next;
+	long timerqueue_node_expires;
+	long timerqueue_node_node;
+	long ktime_t_tv64;
+	long ktime_t_sec;
+	long ktime_t_nsec;
 };
 
 struct size_table {         /* stash of commonly-used sizes */
@@ -1985,6 +2005,8 @@ struct size_table {         /* stash of commonly-used sizes */
 	long rt_rq;
 	long task_group;
 	long vmap_area;
+	long hrtimer_clock_base;
+	long hrtimer_base;
 };
 
 struct array_table {
diff --git a/help.c b/help.c
index c542743..9587de6 100755
--- a/help.c
+++ b/help.c
@@ -2154,13 +2154,17 @@ NULL
 
 char *help_timer[] = {
 "timer",
-"timer queue data",
-" ",
-"  This command displays the timer queue entries, both old- and new-style,",
-"  in chronological order.  In the case of the old-style timers, the",
-"  timer_table array index is shown; in the case of the new-style timers, ",
-"  the timer_list address is shown.  On later kernels, the timer data is",
-"  per-cpu.",
+"timer data",
+"[-r]"
+" ",
+"  This command provides information of timer. With no arguments, the command",
+"  displays the timer queue entries, both old- and new-style, in chronological",
+"  order. In the case of the old-style timers, the timer_table array index is",
+"  shown; in the case of the new-style timers, the timer_list address is",
+"  shown. On later kernels, the timer data is per-cpu.",
+"  ",
+"  -r  shows all hrtimers and their related information. On earlier kernels, ",
+"      expire is an integer, and on later kernels, expire is a range.",
 "\nEXAMPLES",
 "    %s> timer",
 "    JIFFIES",
@@ -2218,6 +2222,55 @@ char *help_timer[] = {
 "    18277900   ceebaec0   c01232bb  <process_timeout>",
 "    18283769   cf739f64   c01232bb  <process_timeout>",
 "    18331902   cee8af64   c01232bb  <process_timeout>",
+" ",
+"  Display hrtimers on a 2-cpu system:\n",
+"    %s> timer -r",
+"    UPTIME: 773504381(1000HZ)",
+"    ",
+"    cpu: 0",
+"     clock: 0",
+"      .base:        ffff880002210f48",
+"      .offset:      1361070676510533252",
+"      .get_time:    ktime_get_real",
+"     EXPIRES      HRTIMER           FUNCTION",
+"    (empty)",
+"    ",
+"     clock: 1",
+"      .base:        ffff880002210f88",
+"      .offset:      0",
+"      .get_time:    ktime_get",
+"                EXPIRES                  HRTIMER           FUNCTION",
+"    773504385000000-773504385000000  ffff880002211040  ffffffff810a0b70  <tick_sched_timer>",
+"    773504593280596-773504623280594  ffff88004b951a68  ffffffff81094d50  <hrtimer_wakeup>",
+"    773508046999849-773508046999849  ffff880002211260  ffffffff810d8ab0  <watchdog_timer_fn>",
+"    774033561838113-774033561888113  ffff88004a889ea8  ffffffff81094d50  <hrtimer_wakeup>",
+"    777636126762107-777636126812107  ffff88004b101ea8  ffffffff81094d50  <hrtimer_wakeup>",
+"    777643135448820-777643135498820  ffff88004be13ea8  ffffffff81094d50  <hrtimer_wakeup>",
+"    835425492707207-835425592707207  ffff8800370ad918  ffffffff81094d50  <hrtimer_wakeup>",
+"    ",
+"    cpu: 1",
+"     clock: 0",
+"      .base:        ffff880002310f48",
+"      .offset:      1361070676510533252",
+"      .get_time:    ktime_get_real",
+"                    EXPIRES                      HRTIMER           FUNCTION",
+"    1361844180919594677-1361844180919644677  ffff88004b3c3d18  ffffffff81094d50  <hrtimer_wakeup>",
+"    1361844222824734000-1361844222824784000  ffff88004b1d3d18  ffffffff81094d50  <hrtimer_wakeup>",
+"    ",
+"     clock: 1",
+"      .base:        ffff880002310f88",
+"      .offset:      0",
+"      .get_time:    ktime_get",
+"                EXPIRES                  HRTIMER           FUNCTION",
+"    773504385250000-773504385250000  ffff880002311040  ffffffff810a0b70  <tick_sched_timer>",
+"    773505214823336-773505214873336  ffff88004ba83ea8  ffffffff81094d50  <hrtimer_wakeup>",
+"    773507489467713-773507489517713  ffff880037ba3ea8  ffffffff81094d50  <hrtimer_wakeup>",
+"    773508047999849-773508047999849  ffff880002311260  ffffffff810d8ab0  <watchdog_timer_fn>",
+"    773519489700296-773519519670295  ffff88003759ba68  ffffffff81094d50  <hrtimer_wakeup>",
+"    773556240968656-773556241018656  ffff88004ab51ea8  ffffffff81094d50  <hrtimer_wakeup>",
+"    773596813474406-773596813474406  ffff880037485468  ffffffff8106f8b0  <it_real_fn>",
+"    773803814242633-773803814242633  ffff88004be8b868  ffffffff8106f8b0  <it_real_fn>",
+"    779470814225727-779470814225727  ffff880037482868  ffffffff8106f8b0  <it_real_fn>",
 NULL               
 };
 
diff --git a/kernel.c b/kernel.c
index c420518..3cb9569 100755
--- a/kernel.c
+++ b/kernel.c
@@ -34,6 +34,13 @@ static void display_bh_1(void);
 static void display_bh_2(void);
 static void display_bh_3(void);
 static void display_bh_4(void);
+static void dump_hrtimer_data(void);
+static void dump_hrtimer_clock_base(const void *, const int);
+static void dump_hrtimer_base(const void *, const int);
+static void dump_active_timers(const void *);
+static int get_expires_len(const int, const ulong *);
+static void print_timer(const void *);
+static ulonglong ktime_to_ns(const void *);
 static void dump_timer_data(void);
 static void dump_timer_data_tvec_bases_v1(void);
 static void dump_timer_data_tvec_bases_v2(void);
@@ -638,6 +645,51 @@ kernel_init()
 	BUG_bytes_init();
 	
 	kt->flags &= ~PRE_KERNEL_INIT;
+
+	/*
+	 *  for hrtimer
+	 */
+	MEMBER_OFFSET_INIT(hrtimer_cpu_base_clock_base, "hrtimer_cpu_base",
+		"clock_base");
+	MEMBER_OFFSET_INIT(hrtimer_clock_base_offset, "hrtimer_clock_base",
+		"offset");
+	MEMBER_OFFSET_INIT(hrtimer_clock_base_active, "hrtimer_clock_base",
+		"active");
+	MEMBER_OFFSET_INIT(hrtimer_clock_base_first, "hrtimer_clock_base",
+		"first");
+	MEMBER_OFFSET_INIT(hrtimer_clock_base_get_time, "hrtimer_clock_base",
+		"get_time");
+	MEMBER_OFFSET_INIT(hrtimer_base_active, "hrtimer_base", "active");
+	MEMBER_OFFSET_INIT(hrtimer_base_first, "hrtimer_base", "first");
+	MEMBER_OFFSET_INIT(hrtimer_base_pending, "hrtimer_base", "pending");
+	MEMBER_OFFSET_INIT(hrtimer_base_get_time, "hrtimer_base", "get_time");
+	MEMBER_OFFSET_INIT(hrtimer_node, "hrtimer", "node");
+	MEMBER_OFFSET_INIT(hrtimer_list, "hrtimer", "list");
+	MEMBER_OFFSET_INIT(hrtimer_expires, "hrtimer", "expires");
+	if (INVALID_MEMBER(hrtimer_expires))
+		MEMBER_OFFSET_INIT(hrtimer_expires, "hrtimer", "_expires");
+	if (INVALID_MEMBER(hrtimer_expires)) {
+		MEMBER_OFFSET_INIT(timerqueue_head_next, "timerqueue_head", "next");
+		MEMBER_OFFSET_INIT(timerqueue_node_expires, "timerqueue_node",
+			"expires");
+		MEMBER_OFFSET_INIT(timerqueue_node_node, "timerqueue_node_node",
+			"node");
+	}
+
+	MEMBER_OFFSET_INIT(hrtimer_softexpires, "hrtimer", "_softexpires");
+	MEMBER_OFFSET_INIT(hrtimer_function, "hrtimer", "function");
+	MEMBER_OFFSET_INIT(ktime_t_tv64, "ktime", "tv64");
+	if (INVALID_MEMBER(ktime_t_tv64))
+		MEMBER_OFFSET_INIT(ktime_t_tv64, "ktime_t", "tv64");
+	MEMBER_OFFSET_INIT(ktime_t_sec, "ktime", "sec");
+	if (INVALID_MEMBER(ktime_t_sec))
+		MEMBER_OFFSET_INIT(ktime_t_sec, "ktime_t", "sec");
+	MEMBER_OFFSET_INIT(ktime_t_nsec, "ktime", "nsec");
+	if (INVALID_MEMBER(ktime_t_nsec))
+		MEMBER_OFFSET_INIT(ktime_t_nsec, "ktime_t", "nsec");
+
+	STRUCT_SIZE_INIT(hrtimer_clock_base, "hrtimer_clock_base");
+	STRUCT_SIZE_INIT(hrtimer_base, "hrtimer_base");
 }
 
 /*
@@ -6333,10 +6385,17 @@ void
 cmd_timer(void)
 {
         int c;
+	int rflag;
 
-        while ((c = getopt(argcnt, args, "")) != EOF) {
+	rflag = 0;
+
+        while ((c = getopt(argcnt, args, "r")) != EOF) {
                 switch(c)
                 {
+		case 'r':
+			rflag = 1;
+			break;
+
                 default:
                         argerrs++;
                         break;
@@ -6346,7 +6405,300 @@ cmd_timer(void)
         if (argerrs)
                 cmd_usage(pc->curcmd, SYNOPSIS);
 
-	dump_timer_data();
+	if (rflag)
+		dump_hrtimer_data();
+	else
+		dump_timer_data();
+}
+
+static void
+dump_hrtimer_data(void)
+{
+	int i, j;
+	ulonglong jiffies_64;
+	int hrtimer_max_clock_bases, max_hrtimer_bases;
+	struct syment * hrtimer_bases;
+
+	hrtimer_max_clock_bases = 0;
+	max_hrtimer_bases = 0;
+
+	/* 
+	 * deside whether hrtimer is available and
+	 * set hrtimer_max_clock_bases or max_hrtimer_bases.
+	 * if both are not available, hrtimer is not available.
+	 */
+	if (VALID_SIZE(hrtimer_clock_base)) {
+		hrtimer_max_clock_bases = 2;
+		if (symbol_exists("ktime_get_boottime"))
+			hrtimer_max_clock_bases = 3;
+	} else if (VALID_SIZE(hrtimer_base)) {
+		max_hrtimer_bases = 2;
+	} else
+		command_not_supported();
+
+	/* get current time(uptime) */
+	get_uptime(NULL, &jiffies_64);
+	fprintf(fp, "UPTIME: %lld(%dHZ)\n\n", jiffies_64, machdep->hz);
+
+	hrtimer_bases = per_cpu_symbol_search("hrtimer_bases");
+	for (i = 0; i < kt->cpus; i++) {
+		if (i)
+			fprintf(fp, "\n");
+		fprintf(fp, "cpu: %d\n", i);
+		if (VALID_SIZE(hrtimer_clock_base)) {
+			for (j = 0; j < hrtimer_max_clock_bases; j++) {
+				if (j)
+					fprintf(fp, "\n");
+				dump_hrtimer_clock_base(
+					(void *)(hrtimer_bases->value) +
+					kt->__per_cpu_offset[i], j);
+			}
+		} else {
+			for (j = 0; j < max_hrtimer_bases; j++) {
+				if (j)
+					fprintf(fp, "\n");
+				dump_hrtimer_base(
+					(void *)(hrtimer_bases->value) +
+					kt->__per_cpu_offset[i], j);
+			}
+		}
+	}
+}
+
+static int expires_len = VADDR_PRLEN;
+
+static void
+dump_hrtimer_clock_base(const void *hrtimer_bases, const int num)
+{
+	void *base;
+	ulonglong offset;
+	ulong get_time;
+	char buf[BUFSIZE];
+
+	base = (void *)hrtimer_bases + OFFSET(hrtimer_cpu_base_clock_base) +
+		SIZE(hrtimer_clock_base) * num;
+	fprintf(fp, " clock: %d\n", num);
+	fprintf(fp, "  .base:\t%lx\n", (ulong)base);
+
+	if (VALID_MEMBER(hrtimer_clock_base_offset)) {
+		offset = ktime_to_ns(base + OFFSET(hrtimer_clock_base_offset));
+		fprintf(fp, "  .offset:\t%lld\n", offset);
+	}
+
+	readmem((ulong)(base + OFFSET(hrtimer_clock_base_get_time)), KVADDR,
+		&get_time, sizeof(get_time), "hrtimer_clock_base get_time",
+		FAULT_ON_ERROR);
+
+	fprintf(fp ,"  .get_time:\t%s\n", value_to_symstr(get_time, buf, 0));
+
+	dump_active_timers(base);
+}
+
+static void
+dump_hrtimer_base(const void *hrtimer_bases, const int num)
+{
+	void *base;
+	ulong get_time;
+	char buf[BUFSIZE];
+	
+	base = (void *)hrtimer_bases + SIZE(hrtimer_base) * num;
+	fprintf(fp, " clock: %d\n", num);
+	fprintf(fp, "  .base:\t%lx\n", (ulong)base);
+
+	readmem((ulong)(base + OFFSET(hrtimer_base_get_time)), KVADDR,
+		&get_time, sizeof(get_time), "hrtimer_base get_time",
+		FAULT_ON_ERROR);
+
+	fprintf(fp ,"  .get_time:\t%s\n", value_to_symstr(get_time, buf, 0));
+
+	dump_active_timers(base);
+}
+
+static void
+dump_active_timers(const void *base)
+{
+	int next, i, t;
+	struct rb_node *curr;
+	int timer_cnt;
+	ulong *timer_list;
+	void  *timer;
+	char buf1[BUFSIZE];
+	char buf2[BUFSIZE];
+	char buf3[BUFSIZE];
+
+
+	next = 0;
+
+	/* search hrtimers */
+	hq_open();
+	timer_cnt = 0;
+next_one:
+	i = 0;
+
+	/* get the first node */
+	if (VALID_MEMBER(hrtimer_base_pending))
+		readmem((ulong)(base + OFFSET(hrtimer_base_pending) -
+			OFFSET(hrtimer_list) + OFFSET(hrtimer_node)),
+			KVADDR, &curr, sizeof(curr), "hrtimer_base pending",
+			FAULT_ON_ERROR);
+	else if (VALID_MEMBER(hrtimer_base_first))
+		readmem((ulong)(base + OFFSET(hrtimer_base_first)),
+			KVADDR, &curr, sizeof(curr), "hrtimer_base first",
+			FAULT_ON_ERROR);
+	else if (VALID_MEMBER(hrtimer_clock_base_first))
+		readmem((ulong)(base + OFFSET(hrtimer_clock_base_first)),
+			KVADDR,	&curr, sizeof(curr), "hrtimer_clock_base first",
+			FAULT_ON_ERROR);
+	else
+		readmem((ulong)(base + OFFSET(hrtimer_clock_base_active) +
+				OFFSET(timerqueue_head_next)),
+			KVADDR, &curr, sizeof(curr), "hrtimer_clock base",
+			FAULT_ON_ERROR);
+
+	while (curr && i < next) {
+		curr = rb_next(curr);
+		i++;
+	}
+
+	if (curr) {
+		if (!hq_enter((ulong)curr)) {
+			error(INFO, "duplicate rb_node: %lx\n", curr);
+			return;
+		}
+
+		timer_cnt++;
+		next++;
+		goto next_one;
+	}
+
+	if (timer_cnt) {
+		timer_list = (ulong *)GETBUF(timer_cnt * sizeof(long));
+		timer_cnt = retrieve_list(timer_list, timer_cnt);
+	}
+	hq_close();
+
+	/* dump hrtimers */
+	/* print header */
+	expires_len = get_expires_len(timer_cnt, timer_list);
+	if (!expires_len)
+		expires_len = 8;
+
+	fprintf(fp, "%s  %s  %s\n",
+		mkstring(buf1, expires_len, CENTER|RJUST, "EXPIRES"),
+		mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "HRTIMER"),
+		mkstring(buf3, VADDR_PRLEN, CENTER|LJUST, "FUNCTION"));
+
+	/* print timer */
+	if (!timer_cnt)
+		fprintf(fp, "(empty)\n");
+
+	for (t = 0; t < timer_cnt; t++) {
+		if (VALID_MEMBER(timerqueue_node_node))
+                        timer = (void *)(timer_list[t] -
+				OFFSET(timerqueue_node_node) -
+                                OFFSET(hrtimer_node));
+                else
+                        timer = (void *)(timer_list[t] - OFFSET(hrtimer_node));
+
+		print_timer(timer);
+	}
+}
+
+static int
+get_expires_len(const int timer_cnt, const ulong *timer_list)
+{
+	void *last_timer;
+	char buf[BUFSIZE];
+	ulonglong softexpires, expires;
+	int len;
+
+	len = 0;
+
+	if (!timer_cnt)
+		return 8;
+
+	if (VALID_MEMBER(timerqueue_node_node))
+        	last_timer = (void *)(timer_list[timer_cnt - 1] -
+			OFFSET(timerqueue_node_node) -
+                	OFFSET(hrtimer_node));
+        else
+                last_timer = (void *)(timer_list[timer_cnt -1] -
+			OFFSET(hrtimer_node));
+
+	/* expire is range */
+	if (VALID_MEMBER(hrtimer_softexpires)) {
+		softexpires = ktime_to_ns(last_timer + OFFSET(hrtimer_softexpires));
+		sprintf(buf, "%lld", softexpires);
+		len += strlen(buf) + 1;
+	}
+
+	if (VALID_MEMBER(hrtimer_expires))
+		expires = ktime_to_ns(last_timer + OFFSET(hrtimer_expires));
+	else
+		expires = ktime_to_ns(last_timer + OFFSET(hrtimer_node) +
+			OFFSET(timerqueue_node_expires));
+
+	sprintf(buf, "%lld", expires);
+	len += strlen(buf);
+
+	return len;
+}
+
+/*
+ * print hrtimer and its related information
+ */
+static void
+print_timer(const void *timer)
+{
+	ulonglong softexpires, expires;
+	
+	ulong function;
+	char buf1[BUFSIZE];
+	char buf2[BUFSIZE];
+	char buf3[BUFSIZE];
+
+	if (VALID_MEMBER(hrtimer_expires))
+		expires = ktime_to_ns(timer + OFFSET(hrtimer_expires));
+	else
+		expires = ktime_to_ns(timer + OFFSET(hrtimer_node) +
+			OFFSET(timerqueue_node_expires));
+
+	if (VALID_MEMBER(hrtimer_softexpires)) {
+		softexpires = ktime_to_ns(timer + OFFSET(hrtimer_softexpires));
+		sprintf(buf1, "%lld-%lld", softexpires, expires);
+	} else {
+		sprintf(buf1, "%lld", expires);
+	}
+	fprintf(fp, "%s  ", mkstring(buf2, expires_len, CENTER|RJUST, buf1));
+
+	fprintf(fp, "%lx  ", (ulong)timer);
+
+	readmem((ulong)(timer + OFFSET(hrtimer_function)), KVADDR, &function,
+		sizeof(function), "hrtimer function", FAULT_ON_ERROR);
+	fprintf(fp, "%lx  ", function);
+	fprintf(fp ,"<%s>\n", value_to_symstr(function, buf3, 0));
+}
+
+/*
+ * convert ktime to ns, only need the address of ktime
+ */
+static ulonglong
+ktime_to_ns(const void *ktime)
+{
+	ulonglong ns;
+
+	if (VALID_MEMBER(ktime_t_tv64))
+		readmem((ulong)ktime + OFFSET(ktime_t_tv64), KVADDR, &ns,
+			sizeof(ns), "ktime_t tv64", FAULT_ON_ERROR);
+	else {
+		uint32_t sec, nsec;
+		readmem((ulong)ktime + OFFSET(ktime_t_sec), KVADDR, &sec,
+			sizeof(sec), "ktime_t sec", FAULT_ON_ERROR);
+		readmem((ulong)ktime + OFFSET(ktime_t_nsec), KVADDR, &nsec,
+		sizeof(nsec), "ktime_t nsec", FAULT_ON_ERROR);
+	}
+
+	return ns;
 }
 
 /*
-- 
1.7.1

>From 95cddfa3a5f10f23ee5a825a5586aad82bb87e7b Mon Sep 17 00:00:00 2001
From: qiaonuohan <qiaonuohan@xxxxxxxxxxxxxx>
Date: Wed, 27 Feb 2013 16:56:11 +0800
Subject: [PATCH 1/2] make rbtree manipulation functions global

rbtree manipulation functions were only used by runq cmd. Now make
them global and available to other commands.
---
 defs.h  |   20 ++++++++++
 task.c  |   94 ----------------------------------------------
 tools.c |  128 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 148 insertions(+), 94 deletions(-)

diff --git a/defs.h b/defs.h
index 1f693c3..2993f2b 100755
--- a/defs.h
+++ b/defs.h
@@ -2256,6 +2256,20 @@ struct alias_data {                 /* command alias storage */
 	char argbuf[1];
 };
 
+struct rb_node
+{
+        unsigned long  rb_parent_color;
+#define RB_RED          0
+#define RB_BLACK        1
+        struct rb_node *rb_right;
+        struct rb_node *rb_left;
+};
+
+struct rb_root
+{
+        struct rb_node *rb_node;
+};
+
 #define NUMBER_STACKFRAMES 4
 
 #define SAVE_RETURN_ADDRESS(retaddr) \
@@ -4082,6 +4096,12 @@ uint16_t swap16(uint16_t, int);
 uint32_t swap32(uint32_t, int);
 int make_cpumask(char *, ulong *, int, int *);
 size_t strlcpy(char *, char *, size_t);
+struct rb_node *rb_first(struct rb_root *);
+struct rb_node *rb_parent(struct rb_node *, struct rb_node *);
+struct rb_node *rb_right(struct rb_node *, struct rb_node *);
+struct rb_node *rb_left(struct rb_node *, struct rb_node *);
+struct rb_node *rb_next(struct rb_node *);
+struct rb_node *rb_last(struct rb_root *);
 
 /* 
  *  symbols.c 
diff --git a/task.c b/task.c
index fec9f6a..6fb2b4e 100755
--- a/task.c
+++ b/task.c
@@ -56,13 +56,6 @@ static void dump_runq(void);
 static void dump_on_rq_timestamp(void);
 static void dump_runqueues(void);
 static void dump_prio_array(int, ulong, char *);
-struct rb_root;
-static struct rb_node *rb_first(struct rb_root *);
-struct rb_node;
-static struct rb_node *rb_next(struct rb_node *);
-static struct rb_node *rb_parent(struct rb_node *, struct rb_node *);
-static struct rb_node *rb_right(struct rb_node *, struct rb_node *);
-static struct rb_node *rb_left(struct rb_node *, struct rb_node *);
 static void dump_task_runq_entry(struct task_context *);
 static void print_group_header_fair(int, ulong, void *);
 static void print_parent_task_group_fair(void *, int);
@@ -7529,93 +7522,6 @@ dump_prio_array(int which, ulong k_prio_array, char *u_prio_array)
 	}
 }
 
-/*
- *  CFS scheduler uses Red-Black trees to maintain run queue.
- */
-struct rb_node
-{
-        unsigned long  rb_parent_color;
-#define RB_RED          0
-#define RB_BLACK        1
-        struct rb_node *rb_right;
-        struct rb_node *rb_left;
-};
-
-struct rb_root
-{
-        struct rb_node *rb_node;
-};
-
-static struct rb_node *
-rb_first(struct rb_root *root)
-{
-        struct rb_root rloc;
-        struct rb_node *n;
-	struct rb_node nloc;
-
-	readmem((ulong)root, KVADDR, &rloc, sizeof(struct rb_root), 
-		"rb_root", FAULT_ON_ERROR);
-
-        n = rloc.rb_node;
-        if (!n)
-                return NULL;
-        while (rb_left(n, &nloc))
-		n = nloc.rb_left;
-
-        return n;
-}
-
-static struct rb_node *
-rb_parent(struct rb_node *node, struct rb_node *nloc)
-{
-	readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), 
-		"rb_node", FAULT_ON_ERROR);
-
-	return (struct rb_node *)(nloc->rb_parent_color & ~3);
-}
-
-static struct rb_node *
-rb_right(struct rb_node *node, struct rb_node *nloc)
-{
-	readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), 
-		"rb_node", FAULT_ON_ERROR);
-
-	return nloc->rb_right;
-}
-
-static struct rb_node *
-rb_left(struct rb_node *node, struct rb_node *nloc)
-{
-	readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), 
-		"rb_node", FAULT_ON_ERROR);
-
-	return nloc->rb_left;
-}
-
-static struct rb_node *
-rb_next(struct rb_node *node)
-{
-	struct rb_node nloc;
-        struct rb_node *parent;
-
-	parent = rb_parent(node, &nloc);
-
-	if (parent == node)
-		return NULL;
-
-        if (nloc.rb_right) {
-		node = nloc.rb_right;
-		while (rb_left(node, &nloc))
-			node = nloc.rb_left;
-		return node;
-	}
-
-        while ((parent = rb_parent(node, &nloc)) && (node == rb_right(parent, &nloc)))
-                node = parent;
-
-        return parent;
-}
-
 #define MAX_GROUP_NUM 200
 struct task_group_info {
 	int use;
diff --git a/tools.c b/tools.c
index d8dd04a..066f7fa 100755
--- a/tools.c
+++ b/tools.c
@@ -5555,3 +5555,131 @@ strlcpy(char *dest, char *src, size_t size)
 	return ret;
 }
 
+struct rb_node *
+rb_first(struct rb_root *root)
+{
+        struct rb_root rloc;
+        struct rb_node *n;
+	struct rb_node nloc;
+
+	readmem((ulong)root, KVADDR, &rloc, sizeof(struct rb_root), 
+		"rb_root", FAULT_ON_ERROR);
+
+        n = rloc.rb_node;
+        if (!n)
+                return NULL;
+        while (rb_left(n, &nloc))
+		n = nloc.rb_left;
+
+        return n;
+}
+
+struct rb_node *
+rb_parent(struct rb_node *node, struct rb_node *nloc)
+{
+	readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), 
+		"rb_node", FAULT_ON_ERROR);
+
+	return (struct rb_node *)(nloc->rb_parent_color & ~3);
+}
+
+struct rb_node *
+rb_right(struct rb_node *node, struct rb_node *nloc)
+{
+	readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), 
+		"rb_node", FAULT_ON_ERROR);
+
+	return nloc->rb_right;
+}
+
+struct rb_node *
+rb_left(struct rb_node *node, struct rb_node *nloc)
+{
+	readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), 
+		"rb_node", FAULT_ON_ERROR);
+
+	return nloc->rb_left;
+}
+
+struct rb_node *
+rb_next(struct rb_node *node)
+{
+	struct rb_node nloc;
+        struct rb_node *parent;
+
+	/* node is destroyed */
+	if (!accessible((ulong)node))
+		return NULL;
+
+	parent = rb_parent(node, &nloc);
+
+	if (parent == node)
+		return NULL;
+
+        if (nloc.rb_right) {
+		/* rb_right is destroyed */
+		if (!accessible((ulong)nloc.rb_right))
+			return NULL;
+
+		node = nloc.rb_right;
+		while (rb_left(node, &nloc)) {
+			/* rb_left is destroyed */
+			if (!accessible((ulong)nloc.rb_left))
+				return NULL;
+			node = nloc.rb_left;
+		}
+		return node;
+	}
+
+	while ((parent = rb_parent(node, &nloc))) {
+		/* parent is destroyed */
+                if (!accessible((ulong)parent))
+                        return NULL;
+
+
+		if (node != rb_right(parent, &nloc))
+			break;
+
+		node = parent;
+	}
+
+        return parent;
+}
+
+struct rb_node *
+rb_last(struct rb_root *root)
+{
+	struct rb_node *node;
+	struct rb_node nloc;
+
+	/* meet destroyed data */
+	if (!accessible((ulong)(root + OFFSET(rb_root_rb_node))))
+		return NULL;
+
+	readmem((ulong)(root + OFFSET(rb_root_rb_node)), KVADDR, &node,
+		sizeof(node), "rb_root node", FAULT_ON_ERROR);
+
+	while (1) {
+		if (!node)
+			break;
+
+		/* meet destroyed data */
+		if (!accessible((ulong)node))
+			return NULL;
+
+		readmem((ulong)node, KVADDR, &nloc, sizeof(struct rb_node),
+		"rb_node last", FAULT_ON_ERROR);
+
+		/*  meet the last one  */
+		if (!nloc.rb_right)
+			break;
+
+		/* meet destroyed data */
+		if (!!accessible((ulong)nloc.rb_right))
+			break;
+
+		node = nloc.rb_right;
+	}
+
+	return node;
+}
-- 
1.7.1

--
Crash-utility mailing list
Crash-utility@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/crash-utility

[Index of Archives]     [Fedora Development]     [Fedora Desktop]     [Fedora SELinux]     [Yosemite News]     [KDE Users]     [Fedora Tools]

 

Powered by Linux