Renaming bpf_tree to bpf_progs_tree and bpf_tree_ops to bpf_progs_tree_ops to better capture the usage of the tree, which is used for the bpf_prog objects only for exception tables search. Signed-off-by: Jiri Olsa <jolsa@xxxxxxxxxx> --- kernel/bpf/core.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 66b17bea286e..50af5dcf7ff9 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -580,13 +580,14 @@ bpf_get_prog_addr_start(struct latch_tree_node *n) return aux->ksym.start; } -static __always_inline bool bpf_tree_less(struct latch_tree_node *a, - struct latch_tree_node *b) +static __always_inline bool +bpf_progs_tree_less(struct latch_tree_node *a, + struct latch_tree_node *b) { return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b); } -static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) +static __always_inline int bpf_progs_tree_comp(void *key, struct latch_tree_node *n) { unsigned long val = (unsigned long)key; const struct bpf_prog_aux *aux; @@ -601,9 +602,9 @@ static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) return 0; } -static const struct latch_tree_ops bpf_tree_ops = { - .less = bpf_tree_less, - .comp = bpf_tree_comp, +static const struct latch_tree_ops bpf_progs_tree_ops = { + .less = bpf_progs_tree_less, + .comp = bpf_progs_tree_comp, }; static __always_inline unsigned long @@ -646,7 +647,7 @@ static const struct latch_tree_ops bpf_kallsyms_tree_ops = { static DEFINE_SPINLOCK(bpf_lock); static LIST_HEAD(bpf_kallsyms); static struct latch_tree_root bpf_kallsyms_tree __cacheline_aligned; -static struct latch_tree_root bpf_tree __cacheline_aligned; +static struct latch_tree_root bpf_progs_tree __cacheline_aligned; static void __bpf_ksym_add(struct bpf_ksym *ksym) { @@ -706,7 +707,8 @@ void bpf_prog_kallsyms_add(struct bpf_prog *fp) bpf_get_prog_name(fp); spin_lock_bh(&bpf_lock); - latch_tree_insert(&fp->aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); + latch_tree_insert(&fp->aux->ksym_tnode, &bpf_progs_tree, + &bpf_progs_tree_ops); __bpf_ksym_add(&fp->aux->ksym); spin_unlock_bh(&bpf_lock); } @@ -717,7 +719,8 @@ void bpf_prog_kallsyms_del(struct bpf_prog *fp) return; spin_lock_bh(&bpf_lock); - latch_tree_erase(&fp->aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); + latch_tree_erase(&fp->aux->ksym_tnode, &bpf_progs_tree, + &bpf_progs_tree_ops); __bpf_ksym_del(&fp->aux->ksym); spin_unlock_bh(&bpf_lock); } @@ -726,7 +729,8 @@ static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr) { struct latch_tree_node *n; - n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); + n = latch_tree_find((void *)addr, &bpf_progs_tree, + &bpf_progs_tree_ops); return n ? container_of(n, struct bpf_prog_aux, ksym_tnode)->prog : NULL; -- 2.24.1