hello dave,
this patch for improving ps performance,when on RHEL 7 OS create 100k
processes, the original ps command spends 51873.6s, after applying my
patch, the time is reduced to 10.2s.
>From 0095be2c68b4154110b348ba5cbbe7dcb36c746d Mon Sep 17 00:00:00 2001
From: panfengyun <panfy.fnst@xxxxxxxxxxxxxx>
Date: Mon, 18 Aug 2014 14:59:43 +0800
Subject: [PATCH] improve ps performance
When a core file contains numerous tasks, especilly on RHEL 7, it will spend too
much time when using ps command to display tasks. The following three parts can
be modified to improve the performance.
1. the task_mm() function and the is_kernel_thread() function have been
called repeatedly.
2. the task_to_pid() function and the task_to_context() function contains
unnecessary cycle.
3. at the get_task_mem_usage() function, repeated get the value of filepages
and spend much time at FOR statement.
Please check the patch to see detailed information.
I test the patch on two OS, and the result is like below(using the following two
command to create processes):
1. on RHEL 6, using "./test2.sh 10 10000" to create 100k processes, the
original ps command spends 179.9s, after applying my patch, the time is
reduced to 9.5s.
2. on RHEL 7, using "./test2.sh 100 1000" to create 100k processes, the
original ps command spends 51873.6s, after applying my patch, the time is
reduced to 10.2s.
test2.sh:
<cut>
a=$1
while [ $a -ge 0 ]
do
sh /root/test.sh $2 &
a=$((a-1))
done
sleep 10000
<cut>
test.sh:
<cut>
a=$1
if [[ $a -eq 0 ]]
then
echo exit
exit
fi
a=$((a-1))
sh /root/test.sh $a
sleep 10000
<cut>
---
crash-7.0.7/defs.h | 19 ++++-
crash-7.0.7/memory.c | 59 ++++++++----
crash-7.0.7/task.c | 256 +++++++++++++++++++++++++++++++++++++++++++++++---
3 files changed, 302 insertions(+), 32 deletions(-)
diff --git a/crash-7.0.7/defs.h b/crash-7.0.7/defs.h
index 44df6ae..f39305a 100755
--- a/crash-7.0.7/defs.h
+++ b/crash-7.0.7/defs.h
@@ -758,13 +758,25 @@ struct task_context { /* context stored for each task */
int processor;
ulong ptask;
ulong mm_struct;
+ int is_kernel_thread;
+ ulong task_mm;
+ ulong tgid;
+ ulong tgid_task_context_index;
struct task_context *tc_next;
};
+struct tgid_task_context{ /* context and tgid stored for each task */
+ ulong tgid;
+ struct task_context *tc;
+};
+
struct task_table { /* kernel/local task table data */
struct task_context *current;
struct task_context *context_array;
+ struct tgid_task_context *ttc_array;
void (*refresh_task_table)(void);
+ long filepages;
+ long anonpages;
ulong flags;
ulong task_start;
ulong task_end;
@@ -4606,7 +4618,7 @@ void dump_vma_cache(ulong);
int is_page_ptr(ulong, physaddr_t *);
void dump_vm_table(int);
int read_string(ulong, char *, int);
-void get_task_mem_usage(ulong, struct task_mem_usage *);
+void get_task_mem_usage(ulong, struct task_mem_usage *, struct task_context *);
char *get_memory_size(char *);
uint64_t generic_memory_size(void);
char *swap_location(ulonglong, char *);
@@ -4788,6 +4800,11 @@ ulong pid_to_task(ulong);
ulong task_to_pid(ulong);
int task_exists(ulong);
int is_kernel_thread(ulong);
+#define NOT_SET (-1)
+int is_kernel_thread_tc(struct task_context *);
+void set_pages(void);
+void set_ttc_array(void);
+void free_ttc_array(void);
int is_idle_thread(ulong);
void get_idle_threads(ulong *, int);
char *task_state_string(ulong, char *, int);
diff --git a/crash-7.0.7/memory.c b/crash-7.0.7/memory.c
index c97dd39..2fc8880 100755
--- a/crash-7.0.7/memory.c
+++ b/crash-7.0.7/memory.c
@@ -3661,7 +3661,12 @@ vm_area_dump(ulong task, ulong flag, ulong vaddr, struct reference *ref)
tc = task_to_context(task);
tm = &task_mem_usage;
- get_task_mem_usage(task, tm);
+ tc->task_mm = NOT_SET;
+ tc->is_kernel_thread = NOT_SET;
+ set_pages();
+ set_ttc_array();
+ get_task_mem_usage(task, tm, tc);
+ free_ttc_array();
single_vma = 0;
single_vma_found = FALSE;
@@ -4076,9 +4081,8 @@ in_user_stack(ulong task, ulong vaddr)
* percent of physical memory being used, and the mm_struct address.
*/
void
-get_task_mem_usage(ulong task, struct task_mem_usage *tm)
+get_task_mem_usage(ulong task, struct task_mem_usage *tm, struct task_context *tc)
{
- struct task_context *tc;
long rss = 0;
BZERO(tm, sizeof(struct task_mem_usage));
@@ -4086,14 +4090,14 @@ get_task_mem_usage(ulong task, struct task_mem_usage *tm)
if (IS_ZOMBIE(task) || IS_EXITING(task))
return;
- tc = task_to_context(task);
-
if (!tc || !tc->mm_struct) /* probably a kernel thread */
return;
tm->mm_struct_addr = tc->mm_struct;
- if (!task_mm(task, TRUE))
+ if (NOT_SET == tc->task_mm)
+ tc->task_mm = task_mm(task, TRUE);
+ if (!(tc->task_mm))
return;
if (VALID_MEMBER(mm_struct_rss))
@@ -4108,11 +4112,8 @@ get_task_mem_usage(ulong task, struct task_mem_usage *tm)
if (VALID_MEMBER(mm_struct_rss_stat)) {
long anonpages, filepages;
- if (!enumerator_value("MM_FILEPAGES", &filepages) ||
- !enumerator_value("MM_ANONPAGES", &anonpages)) {
- filepages = 0;
- anonpages = 1;
- }
+ anonpages = tt->anonpages;
+ filepages = tt->filepages;
rss += LONG(tt->mm_struct +
OFFSET(mm_struct_rss_stat) +
OFFSET(mm_rss_stat_count) +
@@ -4125,16 +4126,31 @@ get_task_mem_usage(ulong task, struct task_mem_usage *tm)
/* Check whether SPLIT_RSS_COUNTING is enabled */
if (VALID_MEMBER(task_struct_rss_stat)) {
- int i, sync_rss;
+ int sync_rss;
ulong tgid;
struct task_context *tc1;
+ struct tgid_task_context *ttc_array, *ttc, *start, *end;
- tgid = task_tgid(task);
-
- tc1 = FIRST_CONTEXT();
- for (i = 0; i < RUNNING_TASKS(); i++, tc1++) {
- if (task_tgid(tc1->task) != tgid)
- continue;
+ ttc_array = tt->ttc_array;
+ tgid = tc->tgid;
+ ttc = ttc_array + tc->tgid_task_context_index;
+ start = ttc;
+ if (tc->tgid_task_context_index > 0)
+ {
+ while ((start > ttc_array) &&
+ ((start -1 )->tgid == start->tgid))
+ start--;
+ }
+ end = ttc;
+ if (tc->tgid_task_context_index < RUNNING_TASKS())
+ {
+ while ((end < (ttc_array + (RUNNING_TASKS() - 1))) &&
+ (end->tgid == (end + 1)->tgid))
+ end++;
+ }
+ for (;start <= end;)
+ {
+ tc1 = start->tc;
/* count 0 -> filepages */
if (!readmem(tc1->task +
@@ -4160,6 +4176,9 @@ get_task_mem_usage(ulong task, struct task_mem_usage *tm)
continue;
rss += sync_rss;
+ if(start == (ttc_array + (RUNNING_TASKS() - 1)))
+ break;
+ start++;
}
}
@@ -4176,7 +4195,9 @@ get_task_mem_usage(ulong task, struct task_mem_usage *tm)
tm->total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm));
tm->pgd_addr = ULONG(tt->mm_struct + OFFSET(mm_struct_pgd));
- if (is_kernel_thread(task))
+ if (NOT_SET == tc->is_kernel_thread)
+ tc->is_kernel_thread = is_kernel_thread_tc(tc);
+ if (tc->is_kernel_thread)
return;
tm->pct_physmem = ((double)(tm->rss*100)) /
diff --git a/crash-7.0.7/task.c b/crash-7.0.7/task.c
index 75b1964..2be3992 100755
--- a/crash-7.0.7/task.c
+++ b/crash-7.0.7/task.c
@@ -2281,6 +2281,7 @@ store_context(struct task_context *tc, ulong task, char *tp)
int *processor_addr;
ulong *parent_addr;
ulong *mm_addr;
+ ulong *tgid;
int has_cpu;
int do_verify;
@@ -2320,6 +2321,7 @@ store_context(struct task_context *tc, ulong task, char *tp)
else
parent_addr = (ulong *)(tp + OFFSET(task_struct_parent));
mm_addr = (ulong *)(tp + OFFSET(task_struct_mm));
+ tgid = (ulong *)(tp + OFFSET(task_struct_tgid));
has_cpu = task_has_cpu(task, tp);
tc->pid = (ulong)(*pid_addr);
@@ -2327,6 +2329,7 @@ store_context(struct task_context *tc, ulong task, char *tp)
tc->processor = *processor_addr;
tc->ptask = *parent_addr;
tc->mm_struct = *mm_addr;
+ tc->tgid = *tgid;
tc->task = task;
tc->tc_next = NULL;
@@ -2924,6 +2927,7 @@ cmd_ps(void)
if (!args[optind]) {
show_ps(PS_SHOW_ALL|flag, &psinfo);
+
return;
}
@@ -3044,6 +3048,8 @@ show_ps_data(ulong flag, struct task_context *tc, struct psinfo *psi)
char buf3[BUFSIZE];
ulong tgid;
+ tc->task_mm = NOT_SET;
+ tc->is_kernel_thread = NOT_SET;
if ((flag & PS_USER) && is_kernel_thread(tc->task))
return;
if ((flag & PS_KERNEL) && !is_kernel_thread(tc->task))
@@ -3102,7 +3108,7 @@ show_ps_data(ulong flag, struct task_context *tc, struct psinfo *psi)
}
tm = &task_mem_usage;
- get_task_mem_usage(tc->task, tm);
+ get_task_mem_usage(tc->task, tm, tc);
fprintf(fp, "%s", is_task_active(tc->task) ? "> " : " ");
fprintf(fp, "%5ld %5ld %2s %s %3s",
tc->pid, task_to_pid(tc->ptask),
@@ -3116,12 +3122,168 @@ show_ps_data(ulong flag, struct task_context *tc, struct psinfo *psi)
fprintf(fp, "%s ", buf1);
fprintf(fp, "%7ld ", (tm->total_vm * PAGESIZE())/1024);
fprintf(fp, "%6ld ", (tm->rss * PAGESIZE())/1024);
- if (is_kernel_thread(tc->task))
+ if (NOT_SET == tc->is_kernel_thread)
+ tc->is_kernel_thread = is_kernel_thread_tc(tc);
+ if (tc->is_kernel_thread)
fprintf(fp, "[%s]\n", tc->comm);
else
fprintf(fp, "%s\n", tc->comm);
}
+/*
+ * adjust heap, screening the index node.
+ */
+static void
+HeapAdjust(struct tgid_task_context *ttc, int index, int length)
+{
+ struct tgid_task_context *temp_ttc;
+ int child;
+
+ temp_ttc = (struct tgid_task_context *)malloc(sizeof(struct tgid_task_context));
+ temp_ttc->tc = NULL;
+
+ temp_ttc->tgid = (ttc + index)->tgid;
+ temp_ttc->tc = (ttc + index)->tc;
+ child = 2 * index + 1;
+ while (child < length)
+ {
+ if ((child < (length - 1)) &&
+ ((ttc + child)->tgid < (ttc + child + 1)->tgid))
+ {
+ ++child;
+ }
+
+ if (temp_ttc->tgid < (ttc + child)->tgid)
+ {
+ (ttc + index)->tgid = (ttc + child)->tgid;
+ (ttc + index)->tc = (ttc + child)->tc;
+ index = child;
+ child = 2 * index + 1;
+ } else {
+ break;
+ }
+ (ttc + index)->tgid = temp_ttc->tgid;
+ (ttc + index)->tc = temp_ttc->tc;
+ }
+ free(temp_ttc);
+}
+
+/*
+ *Initializing heap, then the smallext tgid number will be took in
+ *the first element;
+ */
+static void
+BuildingHeap(struct tgid_task_context *ttc, int length)
+{
+ int i;
+ for (i = (length - 1) / 2; i >= 0; --i)
+ HeapAdjust(ttc, i, length);
+}
+
+/*
+ * Heap sort algorithm
+ */
+static void
+HeapSort(struct tgid_task_context *ttc, int length)
+{
+ int i;
+ struct tgid_task_context *temp, *first, *last;
+
+ temp = (struct tgid_task_context *)malloc(sizeof(struct tgid_task_context));
+ temp->tc = NULL;
+
+ BuildingHeap(ttc, length);
+ for (i = length - 1; i > 0; --i)
+ {
+ first = ttc;
+ last = ttc + i;
+ temp->tgid = first->tgid;
+ temp->tc = first->tc;
+ first->tgid = last->tgid;
+ first->tc = last->tc;
+ last->tgid = temp->tgid;
+ last->tc = temp->tc;
+
+ HeapAdjust(ttc, 0 , i);
+
+ }
+ free(temp);
+}
+
+void
+set_ttc_array(void)
+{
+ struct tgid_task_context *ttc;
+ struct task_context *tc;
+ int i;
+
+ if (VALID_MEMBER(mm_struct_rss))
+ return;
+ if (VALID_MEMBER(task_struct_rss_stat))
+ {
+ if (!(tt->ttc_array = (struct tgid_task_context *)
+ malloc(RUNNING_TASKS() * sizeof(struct tgid_task_context))))
+ {
+ error(FATAL, "cannot malloc ttc array (%d tasks)",RUNNING_TASKS());
+ return ;
+ }
+ /*init ttc*/
+ tc = FIRST_CONTEXT();
+ ttc = tt->ttc_array;
+ for (i = 0; i < RUNNING_TASKS(); i++, tc++, ttc++)
+ {
+ ttc->tgid = tc->tgid;
+ ttc->tc = tc;
+ }
+ /*sort ttc*/
+ HeapSort(tt->ttc_array, RUNNING_TASKS());
+ /*set ttc->tc->tgid-task_context_index*/
+ ttc = tt->ttc_array;
+ for (i = 0; i < RUNNING_TASKS(); i++, ttc++)
+ {
+ ttc->tc->tgid_task_context_index = i;
+ }
+ } else
+ return;
+}
+
+void
+free_ttc_array(void)
+{
+ if (VALID_MEMBER(mm_struct_rss))
+ return;
+ if (VALID_MEMBER(task_struct_rss_stat) && (tt->ttc_array != NULL))
+ {
+ free(tt->ttc_array);
+ }
+ tt->ttc_array = NULL;
+}
+
+/*
+ * set the const value of filepages and anonpages
+ * according to MM_FILEPAGES and MM_ANONPAGES.
+ */
+void
+set_pages(void)
+{
+ long anonpages, filepages;
+ if (VALID_MEMBER(mm_struct_rss))
+ return;
+ if (VALID_MEMBER(mm_struct_rss_stat))
+ {
+ if (!enumerator_value("MM_FILEPAGES", &filepages) ||
+ !enumerator_value("MM_ANONPAGES", &anonpages))
+ {
+ filepages = 0;
+ anonpages = 1;
+ }
+ tt->filepages = filepages;
+ tt->anonpages = anonpages;
+ } else {
+ return;
+ }
+}
+
static void
show_ps(ulong flag, struct psinfo *psi)
{
@@ -3130,6 +3292,7 @@ show_ps(ulong flag, struct psinfo *psi)
int print;
char buf[BUFSIZE];
+ tt->ttc_array = NULL;
if (!(flag & (PS_EXCLUSIVE|PS_NO_HEADER)))
fprintf(fp,
" PID PPID CPU %s ST %%MEM VSZ RSS COMM\n",
@@ -3154,10 +3317,24 @@ show_ps(ulong flag, struct psinfo *psi)
return;
}
+ set_pages();
+ set_ttc_array();
+ hq_open();
+ tc = FIRST_CONTEXT();
+ for (i = 0; i < RUNNING_TASKS(); i++, tc++)
+ {
+ if (!hq_enter(tc->task))
+ {
+ error(FATAL, "cannot hq_enter:a duplicate entry is found\n");
+ return;
+ }
+ }
+
tc = FIRST_CONTEXT();
for (i = 0; i < RUNNING_TASKS(); i++, tc++)
show_ps_data(flag, tc, NULL);
-
+ hq_close();
+ free_ttc_array();
return;
}
@@ -4126,16 +4303,23 @@ pid_to_task(ulong pid)
ulong
task_to_pid(ulong task)
{
- int i;
- struct task_context *tc;
+ char *tp;
+ pid_t *pid_addr;
+ int value;
-
- tc = FIRST_CONTEXT();
- for (i = 0; i < RUNNING_TASKS(); i++, tc++)
- if (tc->task == task)
- return(tc->pid);
-
- return(NO_PID);
+ value = hq_entry_exists(task);
+ if (value)
+ {
+ if ((tp = fill_task_struct(task)))
+ {
+ pid_addr = (pid_t *)(tp + OFFSET(task_struct_pid));
+ return (ulong)(*pid_addr);
+ } else
+ error(WARNING, "get task tp: %lx failed\n",task);
+ } else
+ error(WARNING, "task does not exit:%lx\n",task);
+
+ return(NO_PID);
}
/*
@@ -6935,6 +7119,54 @@ is_kernel_thread(ulong task)
return FALSE;
}
+/*
+ * Determine whether a task is a kernel thread. The function like
+ * is_kernel_thread function,but the function parameter is diffrent.
+ */
+int
+is_kernel_thread_tc(struct task_context *tc)
+{
+ ulong task;
+
+ task = tc->task;
+
+ if ((tc->pid == 0) && !STREQ(tc->comm, pc->program_name))
+ return TRUE;
+
+ if (_ZOMBIE_ == TASK_STATE_UNINITIALIZED)
+ initialize_task_state();
+
+ if (IS_ZOMBIE(task) || IS_EXITING(task))
+ return FALSE;
+
+ /*
+ * Check for shifting sands on a live system.
+ */
+
+ if (NOT_SET == tc->task_mm)
+ tc->task_mm = task_mm(task, TRUE);
+
+ if (ACTIVE() && (tc->task_mm != tc->mm_struct))
+ return FALSE;
+
+ /*
+ * Later version Linux kernel threads have no mm_struct at all.
+ * Earlier version kernel threads point to common init_mm.
+ */
+ if (!tc->mm_struct) {
+ if (IS_EXITING(task))
+ return FALSE;
+
+ if (!task_state(task) && !task_flags(task))
+ return FALSE;
+
+ return TRUE;
+
+ } else if (tc->mm_struct == symbol_value("init_mm"))
+ return TRUE;
+
+ return FALSE;
+}
/*
* Gather an arry of pointers to the per-cpu idle tasks. The tasklist
--
1.7.1
--
Crash-utility mailing list
Crash-utility@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/crash-utility