On Sat, Feb 8, 2020 at 7:43 AM Jiri Olsa <jolsa@xxxxxxxxxx> wrote: > > The bpf_tree is used both for kallsyms iterations and searching > for exception tables of bpf programs, which is needed only for > bpf programs. > > Adding bpf_kallsyms_tree that will hold symbols for all bpf_prog, > bpf_trampoline and bpf_dispatcher objects and keeping bpf_tree > only for bpf_prog objects exception tables search to keep it fast. > > Signed-off-by: Jiri Olsa <jolsa@xxxxxxxxxx> > --- > include/linux/bpf.h | 1 + > kernel/bpf/core.c | 60 ++++++++++++++++++++++++++++++++++++++++----- > 2 files changed, 55 insertions(+), 6 deletions(-) > > diff --git a/include/linux/bpf.h b/include/linux/bpf.h > index da67ca3afa2f..151d7b1c8435 100644 > --- a/include/linux/bpf.h > +++ b/include/linux/bpf.h > @@ -468,6 +468,7 @@ struct bpf_ksym { > unsigned long end; > char name[KSYM_NAME_LEN]; > struct list_head lnode; > + struct latch_tree_node tnode; > }; > > enum bpf_tramp_prog_type { > diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c > index b9b7077e60f3..1daa72341450 100644 > --- a/kernel/bpf/core.c > +++ b/kernel/bpf/core.c > @@ -606,8 +606,46 @@ static const struct latch_tree_ops bpf_tree_ops = { > .comp = bpf_tree_comp, > }; > > +static __always_inline unsigned long > +bpf_get_ksym_start(struct latch_tree_node *n) I thought static functions are never marked as inline in kernel sources. Are there some special cases when its ok/necessary? > +{ > + const struct bpf_ksym *ksym; > + > + ksym = container_of(n, struct bpf_ksym, tnode); > + return ksym->start; > +} > + > +static __always_inline bool > +bpf_ksym_tree_less(struct latch_tree_node *a, > + struct latch_tree_node *b) > +{ > + return bpf_get_ksym_start(a) < bpf_get_ksym_start(b); > +} > + > +static __always_inline int > +bpf_ksym_tree_comp(void *key, struct latch_tree_node *n) > +{ > + unsigned long val = (unsigned long)key; > + const struct bpf_ksym *ksym; > + > + ksym = container_of(n, struct bpf_ksym, tnode); > + > + if (val < ksym->start) > + return -1; > + if (val >= ksym->end) > + return 1; > + > + return 0; > +} > + > +static const struct latch_tree_ops bpf_kallsyms_tree_ops = { Given all the helper functions use bpf_ksym_tree and bpf_ksym (bpf_ksym_find) prefixes, call this bpf_ksym_tree_ops? > + .less = bpf_ksym_tree_less, > + .comp = bpf_ksym_tree_comp, > +}; > + > static DEFINE_SPINLOCK(bpf_lock); > static LIST_HEAD(bpf_kallsyms); > +static struct latch_tree_root bpf_kallsyms_tree __cacheline_aligned; same as above, bpf_ksym_tree for consistency? > static struct latch_tree_root bpf_tree __cacheline_aligned; > > static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux) > @@ -615,6 +653,7 @@ static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux) > WARN_ON_ONCE(!list_empty(&aux->ksym.lnode)); > list_add_tail_rcu(&aux->ksym.lnode, &bpf_kallsyms); > latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); > + latch_tree_insert(&aux->ksym.tnode, &bpf_kallsyms_tree, &bpf_kallsyms_tree_ops); > } > [...]