Adding bpf_ksym_add/del functions as locked version for __bpf_ksym_add/del. It will be used in following patches for bpf_trampoline and bpf_dispatcher. Signed-off-by: Jiri Olsa <jolsa@xxxxxxxxxx> --- include/linux/bpf.h | 3 +++ kernel/bpf/core.c | 14 ++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 5d6649cdc3df..76934893bccf 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -573,6 +573,9 @@ struct bpf_image { #define BPF_IMAGE_SIZE (PAGE_SIZE - sizeof(struct bpf_image)) bool is_bpf_image_address(unsigned long address); void *bpf_image_alloc(void); +/* Called only from code, so there's no need for stubs. */ +void bpf_ksym_add(struct bpf_ksym *ksym); +void bpf_ksym_del(struct bpf_ksym *ksym); #else static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key) { diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index f361cfdf8d9e..739bef60d868 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -655,6 +655,13 @@ static void __bpf_ksym_add(struct bpf_ksym *ksym) latch_tree_insert(&ksym->tnode, &bpf_ksym_tree, &bpf_ksym_tree_ops); } +void bpf_ksym_add(struct bpf_ksym *ksym) +{ + spin_lock_bh(&bpf_lock); + __bpf_ksym_add(ksym); + spin_unlock_bh(&bpf_lock); +} + static void __bpf_ksym_del(struct bpf_ksym *ksym) { if (list_empty(&ksym->lnode)) @@ -664,6 +671,13 @@ static void __bpf_ksym_del(struct bpf_ksym *ksym) list_del_rcu(&ksym->lnode); } +void bpf_ksym_del(struct bpf_ksym *ksym) +{ + spin_lock_bh(&bpf_lock); + __bpf_ksym_del(ksym); + spin_unlock_bh(&bpf_lock); +} + static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) { return fp->jited && !bpf_prog_was_classic(fp); -- 2.24.1