On Thu, Apr 14, 2022 at 9:12 AM Stanislav Fomichev <sdf@xxxxxxxxxx> wrote: > +static int > +bpf_prog_run_array_cg_flags(const struct cgroup_bpf *cgrp, > + enum cgroup_bpf_attach_type atype, > + const void *ctx, bpf_prog_run_fn run_prog, > + int retval, u32 *ret_flags) > +{ > + const struct bpf_prog_array_item *item; > + const struct bpf_prog *prog; > + const struct bpf_prog_array *array; > + struct bpf_run_ctx *old_run_ctx; > + struct bpf_cg_run_ctx run_ctx; > + u32 func_ret; > + > + run_ctx.retval = retval; > + migrate_disable(); > + rcu_read_lock(); > + array = rcu_dereference(cgrp->effective[atype]); > + item = &array->items[0]; > + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); > + while ((prog = READ_ONCE(item->prog))) { > + run_ctx.prog_item = item; > + func_ret = run_prog(prog, ctx); ... > + ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT, > &ctx, bpf_prog_run, retval); Did you check the asm that bpf_prog_run gets inlined after being passed as a pointer to a function? Crossing fingers... I suspect not every compiler can do that :( De-virtualization optimization used to be tricky.