Add two kfunc's bpf_rcu_read_lock() and bpf_rcu_read_unlock(). These two kfunc's can be used for all program types. A new kfunc hook type BTF_KFUNC_HOOK_GENERIC is added which corresponds to prog type BPF_PROG_TYPE_UNSPEC, indicating the kfunc intends to be used for all prog types. The kfunc bpf_rcu_read_lock() is tagged with new flag KF_RCU_LOCK and bpf_rcu_read_unlock() with new flag KF_RCU_UNLOCK. These two new flags are used by the verifier to identify these two helpers. Signed-off-by: Yonghong Song <yhs@xxxxxx> --- include/linux/bpf.h | 3 +++ include/linux/btf.h | 2 ++ kernel/bpf/btf.c | 8 ++++++++ kernel/bpf/helpers.c | 25 ++++++++++++++++++++++++- 4 files changed, 37 insertions(+), 1 deletion(-) For new kfuncs, I added KF_RCU_LOCK and KF_RCU_UNLOCK flags to indicate a helper could be bpf_rcu_read_lock/unlock(). This could be a waste for kfunc flag space as the flag is used to identify one helper. Alternatively, we might identify kfunc based on btf_id. Any suggestions are welcome. diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 5011cb50abf1..b4bbcafd1c9b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2118,6 +2118,9 @@ bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog); const struct btf_func_model * bpf_jit_find_kfunc_model(const struct bpf_prog *prog, const struct bpf_insn *insn); +void bpf_rcu_read_lock(void); +void bpf_rcu_read_unlock(void); + struct bpf_core_ctx { struct bpf_verifier_log *log; const struct btf *btf; diff --git a/include/linux/btf.h b/include/linux/btf.h index d80345fa566b..8783ca7e6079 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -51,6 +51,8 @@ #define KF_TRUSTED_ARGS (1 << 4) /* kfunc only takes trusted pointer arguments */ #define KF_SLEEPABLE (1 << 5) /* kfunc may sleep */ #define KF_DESTRUCTIVE (1 << 6) /* kfunc performs destructive actions */ +#define KF_RCU_LOCK (1 << 7) /* kfunc does rcu_read_lock() */ +#define KF_RCU_UNLOCK (1 << 8) /* kfunc does rcu_read_unlock() */ /* * Return the name of the passed struct, if exists, or halt the build if for diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index cf16c0ead9f4..d2ee1669a2f3 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -199,6 +199,7 @@ DEFINE_IDR(btf_idr); DEFINE_SPINLOCK(btf_idr_lock); enum btf_kfunc_hook { + BTF_KFUNC_HOOK_GENERIC, BTF_KFUNC_HOOK_XDP, BTF_KFUNC_HOOK_TC, BTF_KFUNC_HOOK_STRUCT_OPS, @@ -7498,6 +7499,8 @@ static u32 *__btf_kfunc_id_set_contains(const struct btf *btf, static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type) { switch (prog_type) { + case BPF_PROG_TYPE_UNSPEC: + return BTF_KFUNC_HOOK_GENERIC; case BPF_PROG_TYPE_XDP: return BTF_KFUNC_HOOK_XDP; case BPF_PROG_TYPE_SCHED_CLS: @@ -7526,6 +7529,11 @@ u32 *btf_kfunc_id_set_contains(const struct btf *btf, u32 kfunc_btf_id) { enum btf_kfunc_hook hook; + u32 *kfunc_flags; + + kfunc_flags = __btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_GENERIC, kfunc_btf_id); + if (kfunc_flags) + return kfunc_flags; hook = bpf_prog_type_to_kfunc_hook(prog_type); return __btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id); diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 283f55bbeb70..f364d01e9d93 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1717,9 +1717,32 @@ static const struct btf_kfunc_id_set tracing_kfunc_set = { .set = &tracing_btf_ids, }; +void bpf_rcu_read_lock(void) +{ + rcu_read_lock(); +} + +void bpf_rcu_read_unlock(void) +{ + rcu_read_unlock(); +} + +BTF_SET8_START(generic_btf_ids) +BTF_ID_FLAGS(func, bpf_rcu_read_lock, KF_RCU_LOCK) +BTF_ID_FLAGS(func, bpf_rcu_read_unlock, KF_RCU_UNLOCK) +BTF_SET8_END(generic_btf_ids) + +static const struct btf_kfunc_id_set generic_kfunc_set = { + .owner = THIS_MODULE, + .set = &generic_btf_ids, +}; + static int __init kfunc_init(void) { - return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &tracing_kfunc_set); + int ret; + + ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &tracing_kfunc_set); + return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &generic_kfunc_set); } late_initcall(kfunc_init); -- 2.30.2