5.4-stable review patch. If anyone has any objections, please let me know. ------------------ From: Jiri Olsa <jolsa@xxxxxxxxxx> [ Upstream commit 389799a7a1e86c55f38897e679762efadcc9dedd ] To speed up cpu to node lookup, add perf_env__numa_node(), that creates cpu array on the first lookup, that holds numa nodes for each stored cpu. Signed-off-by: Jiri Olsa <jolsa@xxxxxxxxxx> Cc: Alexander Shishkin <alexander.shishkin@xxxxxxxxxxxxxxx> Cc: Alexey Budankov <alexey.budankov@xxxxxxxxxxxxxxx> Cc: Andi Kleen <ak@xxxxxxxxxxxxxxx> Cc: Joe Mario <jmario@xxxxxxxxxx> Cc: Kan Liang <kan.liang@xxxxxxxxxxxxxxx> Cc: Michael Petlan <mpetlan@xxxxxxxxxx> Cc: Namhyung Kim <namhyung@xxxxxxxxxx> Cc: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx> Link: http://lkml.kernel.org/r/20190904073415.723-3-jolsa@xxxxxxxxxx Signed-off-by: Arnaldo Carvalho de Melo <acme@xxxxxxxxxx> Stable-dep-of: 9c51f8788b5d ("perf env: Avoid recursively taking env->bpf_progs.lock") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> --- tools/perf/util/env.c | 40 ++++++++++++++++++++++++++++++++++++++++ tools/perf/util/env.h | 6 ++++++ 2 files changed, 46 insertions(+) diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index ef64e197bc8d..2d517f377053 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -183,6 +183,7 @@ void perf_env__exit(struct perf_env *env) zfree(&env->sibling_threads); zfree(&env->pmu_mappings); zfree(&env->cpu); + zfree(&env->numa_map); for (i = 0; i < env->nr_numa_nodes; i++) perf_cpu_map__put(env->numa_nodes[i].map); @@ -342,3 +343,42 @@ const char *perf_env__arch(struct perf_env *env) return normalize_arch(arch_name); } + + +int perf_env__numa_node(struct perf_env *env, int cpu) +{ + if (!env->nr_numa_map) { + struct numa_node *nn; + int i, nr = 0; + + for (i = 0; i < env->nr_numa_nodes; i++) { + nn = &env->numa_nodes[i]; + nr = max(nr, perf_cpu_map__max(nn->map)); + } + + nr++; + + /* + * We initialize the numa_map array to prepare + * it for missing cpus, which return node -1 + */ + env->numa_map = malloc(nr * sizeof(int)); + if (!env->numa_map) + return -1; + + for (i = 0; i < nr; i++) + env->numa_map[i] = -1; + + env->nr_numa_map = nr; + + for (i = 0; i < env->nr_numa_nodes; i++) { + int tmp, j; + + nn = &env->numa_nodes[i]; + perf_cpu_map__for_each_cpu(j, tmp, nn->map) + env->numa_map[j] = i; + } + } + + return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1; +} diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h index 37028215d4a5..ceddddace5cc 100644 --- a/tools/perf/util/env.h +++ b/tools/perf/util/env.h @@ -87,6 +87,10 @@ struct perf_env { struct rb_root btfs; u32 btfs_cnt; } bpf_progs; + + /* For fast cpu to numa node lookup via perf_env__numa_node */ + int *numa_map; + int nr_numa_map; }; enum perf_compress_type { @@ -119,4 +123,6 @@ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env, __u32 prog_id); bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node); struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id); + +int perf_env__numa_node(struct perf_env *env, int cpu); #endif /* __PERF_ENV_H */ -- 2.43.0