On Tue, Feb 13, 2024 at 10:37 PM Ian Rogers <irogers@xxxxxxxxxx> wrote: > > Commit 91e467bc568f ("perf machine: Use hashtable for machine > threads") made the iteration of thread tids unordered. The perf trace > --summary output sorts and prints each hash bucket, rather than all > threads globally. Change this behavior by turn all threads into a > list, sort the list by number of trace events then by tids, finally > print the list. This also allows the rbtree in threads to be not > accessed outside of machine. > > Signed-off-by: Ian Rogers <irogers@xxxxxxxxxx> > --- > tools/perf/builtin-trace.c | 41 +++++++++++++++++++++---------------- > tools/perf/util/rb_resort.h | 5 ----- > 2 files changed, 23 insertions(+), 23 deletions(-) > > diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c > index 109b8e64fe69..90eaff8c0f6e 100644 > --- a/tools/perf/builtin-trace.c > +++ b/tools/perf/builtin-trace.c > @@ -74,6 +74,7 @@ > #include <linux/err.h> > #include <linux/filter.h> > #include <linux/kernel.h> > +#include <linux/list_sort.h> > #include <linux/random.h> > #include <linux/stringify.h> > #include <linux/time64.h> > @@ -4312,34 +4313,38 @@ static unsigned long thread__nr_events(struct thread_trace *ttrace) > return ttrace ? ttrace->nr_events : 0; > } > > -DEFINE_RESORT_RB(threads, > - (thread__nr_events(thread__priv(a->thread)) < > - thread__nr_events(thread__priv(b->thread))), > - struct thread *thread; > -) > +static int trace_nr_events_cmp(void *priv __maybe_unused, > + const struct list_head *la, > + const struct list_head *lb) > { > - entry->thread = rb_entry(nd, struct thread_rb_node, rb_node)->thread; > + struct thread_list *a = list_entry(la, struct thread_list, list); > + struct thread_list *b = list_entry(lb, struct thread_list, list); > + unsigned long a_nr_events = thread__nr_events(thread__priv(a->thread)); > + unsigned long b_nr_events = thread__nr_events(thread__priv(b->thread)); > + > + if (a_nr_events != b_nr_events) > + return a_nr_events < b_nr_events ? -1 : 1; > + > + /* Identical number of threads, place smaller tids first. */ > + return thread__tid(a->thread) < thread__tid(b->thread) > + ? -1 > + : (thread__tid(a->thread) > thread__tid(b->thread) ? 1 : 0); I'm not sure if it can have a case where two different threads in the hash table can have the same tid. If not, it can simplify the last case. > } > > static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp) > { > size_t printed = trace__fprintf_threads_header(fp); > - struct rb_node *nd; > - int i; > - > - for (i = 0; i < THREADS__TABLE_SIZE; i++) { > - DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i); > + LIST_HEAD(threads); > > - if (threads == NULL) { > - fprintf(fp, "%s", "Error sorting output by nr_events!\n"); > - return 0; > - } > + if (machine__thread_list(trace->host, &threads) == 0) { > + struct thread_list *pos; > > - resort_rb__for_each_entry(nd, threads) > - printed += trace__fprintf_thread(fp, threads_entry->thread, trace); > + list_sort(NULL, &threads, trace_nr_events_cmp); Same concern, it'd be nice if we can use an array instead. Thanks, Namhyung > > - resort_rb__delete(threads); > + list_for_each_entry(pos, &threads, list) > + printed += trace__fprintf_thread(fp, pos->thread, trace); > } > + thread_list__delete(&threads); > return printed; > } > > diff --git a/tools/perf/util/rb_resort.h b/tools/perf/util/rb_resort.h > index 376e86cb4c3c..d927a0d25052 100644 > --- a/tools/perf/util/rb_resort.h > +++ b/tools/perf/util/rb_resort.h > @@ -143,9 +143,4 @@ struct __name##_sorted *__name = __name##_sorted__new > DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries.rb_root, \ > __ilist->rblist.nr_entries) > > -/* For 'struct machine->threads' */ > -#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine, hash_bucket) \ > - DECLARE_RESORT_RB(__name)(&__machine->threads[hash_bucket].entries.rb_root, \ > - __machine->threads[hash_bucket].nr) > - > #endif /* _PERF_RESORT_RB_H_ */ > -- > 2.43.0.687.g38aa6559b0-goog >