This patch adds active references hash table and free references list to struct bpf_run_ctx. The active reference hash table stores active reference nodes that record the references currently held by the bpf program. The free reference list stores free reference nodes. At the beginning all reference nodes are free. During initialization, max_acquired_refs number of reference nodes will be created to record information about the references held by the bpf program. A reference node records information including the object btf id and the object memory address. The bpf context will be initialized through init_bpf_context before the bpf program runs, and cleared through clear_bpf_context after the bpf program ends. Currently only used to demonstrate the idea, so only applied to the syscall program type (only added to bpf_prog_test_run_syscall). Signed-off-by: Juntong Deng <juntong.deng@xxxxxxxxxxx> --- include/linux/bpf.h | 6 +++++- include/linux/btf.h | 7 +++++++ net/bpf/test_run.c | 37 +++++++++++++++++++++++++++++++++++++ 3 files changed, 49 insertions(+), 1 deletion(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3ccc20f936b2..1bc90d805872 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -11,6 +11,7 @@ #include <linux/file.h> #include <linux/percpu.h> #include <linux/err.h> +#include <linux/hashtable.h> #include <linux/rbtree_latch.h> #include <linux/numa.h> #include <linux/mm_types.h> @@ -2109,7 +2110,10 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array, u64 bpf_cookie, struct bpf_prog_array **new_array); -struct bpf_run_ctx {}; +struct bpf_run_ctx { + DECLARE_HASHTABLE(active_ref_list, 5); + struct list_head free_ref_list; +}; struct bpf_cg_run_ctx { struct bpf_run_ctx run_ctx; diff --git a/include/linux/btf.h b/include/linux/btf.h index ebc0c0c9b944..2bd7fc996756 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -139,6 +139,13 @@ struct btf_struct_metas { struct btf_struct_meta types[]; }; +struct bpf_ref_node { + struct hlist_node hnode; + struct list_head lnode; + u32 struct_btf_id; + unsigned long obj_addr; +}; + extern const struct file_operations btf_fops; const char *btf_get_name(const struct btf *btf); diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 8f6f7db48d4e..13d0994883c0 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -1530,12 +1530,43 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kat return ret; } +static void init_bpf_context(struct bpf_run_ctx *ctx, struct bpf_prog *prog) +{ + struct bpf_ref_node *node; + int i; + + hash_init(ctx->active_ref_list); + INIT_LIST_HEAD(&ctx->free_ref_list); + + for (i = 0; i < prog->max_acquired_refs; i++) { + node = kmalloc(sizeof(*node), GFP_KERNEL); + list_add(&node->lnode, &ctx->free_ref_list); + } +} + +static void clear_bpf_context(struct bpf_run_ctx *ctx) +{ + struct bpf_ref_node *node, *tmp; + int bkt; + + hash_for_each(ctx->active_ref_list, bkt, node, hnode) { + hash_del(&node->hnode); + kfree(node); + } + + list_for_each_entry_safe(node, tmp, &ctx->free_ref_list, lnode) { + list_del(&node->lnode); + kfree(node); + } +} + int bpf_prog_test_run_syscall(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr) { void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); __u32 ctx_size_in = kattr->test.ctx_size_in; + struct bpf_run_ctx *old_ctx, run_ctx; void *ctx = NULL; u32 retval; int err = 0; @@ -1557,10 +1588,16 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog, return PTR_ERR(ctx); } + init_bpf_context(&run_ctx, prog); + old_ctx = bpf_set_run_ctx(&run_ctx); + rcu_read_lock_trace(); retval = bpf_prog_run_pin_on_cpu(prog, ctx); rcu_read_unlock_trace(); + bpf_reset_run_ctx(old_ctx); + clear_bpf_context(&run_ctx); + if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) { err = -EFAULT; goto out; -- 2.39.5