On Fri, Jun 23, 2023 at 1:19 AM Jiri Olsa <olsajiri@xxxxxxxxx> wrote: > > On Thu, Jun 22, 2023 at 05:18:05PM -0700, Andrii Nakryiko wrote: > > On Tue, Jun 20, 2023 at 1:36 AM Jiri Olsa <jolsa@xxxxxxxxxx> wrote: > > > > > > Adding new multi uprobe link that allows to attach bpf program > > > to multiple uprobes. > > > > > > Uprobes to attach are specified via new link_create uprobe_multi > > > union: > > > > > > struct { > > > __u32 flags; > > > __u32 cnt; > > > __aligned_u64 path; > > > __aligned_u64 offsets; > > > __aligned_u64 ref_ctr_offsets; > > > } uprobe_multi; > > > > > > Uprobes are defined for single binary specified in path and multiple > > > calling sites specified in offsets array with optional reference > > > counters specified in ref_ctr_offsets array. All specified arrays > > > have length of 'cnt'. > > > > > > The 'flags' supports single bit for now that marks the uprobe as > > > return probe. > > > > > > Signed-off-by: Jiri Olsa <jolsa@xxxxxxxxxx> > > > --- > > > include/linux/trace_events.h | 6 + > > > include/uapi/linux/bpf.h | 14 ++ > > > kernel/bpf/syscall.c | 12 +- > > > kernel/trace/bpf_trace.c | 237 +++++++++++++++++++++++++++++++++ > > > tools/include/uapi/linux/bpf.h | 14 ++ > > > 5 files changed, 281 insertions(+), 2 deletions(-) > > > > > > > [...] > > > > > diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c > > > index a75c54b6f8a3..a96e46cd407e 100644 > > > --- a/kernel/bpf/syscall.c > > > +++ b/kernel/bpf/syscall.c > > > @@ -3516,6 +3516,11 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, > > > return prog->enforce_expected_attach_type && > > > prog->expected_attach_type != attach_type ? > > > -EINVAL : 0; > > > + case BPF_PROG_TYPE_KPROBE: > > > + if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI && > > > + attach_type != BPF_TRACE_KPROBE_MULTI) > > > > should this be UPROBE_MULTI? this looks like your recent bug fix, > > which already landed > > > > > + return -EINVAL; > > > + fallthrough; > > > > and I replaced this with `return 0;` ;) > > ugh, yes, will fix > > > > default: > > > return 0; > > > } > > > @@ -4681,7 +4686,8 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) > > > break; > > > case BPF_PROG_TYPE_KPROBE: > > > if (attr->link_create.attach_type != BPF_PERF_EVENT && > > > - attr->link_create.attach_type != BPF_TRACE_KPROBE_MULTI) { > > > + attr->link_create.attach_type != BPF_TRACE_KPROBE_MULTI && > > > + attr->link_create.attach_type != BPF_TRACE_UPROBE_MULTI) { > > > ret = -EINVAL; > > > goto out; > > > } > > > > should this be moved into bpf_prog_attach_check_attach_type() and > > unify these checks? > > ok, perhaps we could move there the whole switch, will check +1 > > > > > > @@ -4748,8 +4754,10 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) > > > case BPF_PROG_TYPE_KPROBE: > > > if (attr->link_create.attach_type == BPF_PERF_EVENT) > > > ret = bpf_perf_link_attach(attr, prog); > > > - else > > > + else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI) > > > ret = bpf_kprobe_multi_link_attach(attr, prog); > > > + else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI) > > > + ret = bpf_uprobe_multi_link_attach(attr, prog); > > > break; > > > default: > > > ret = -EINVAL; > > > > [...] > > > > > +static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes, > > > + u32 cnt) > > > +{ > > > + u32 i; > > > + > > > + for (i = 0; i < cnt; i++) { > > > + uprobe_unregister(d_real_inode(path->dentry), uprobes[i].offset, > > > + &uprobes[i].consumer); > > > + } > > > +} > > > + > > > +static void bpf_uprobe_multi_link_release(struct bpf_link *link) > > > +{ > > > + struct bpf_uprobe_multi_link *umulti_link; > > > + > > > + umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); > > > + bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt); > > > + path_put(&umulti_link->path); > > > +} > > > + > > > +static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link) > > > +{ > > > + struct bpf_uprobe_multi_link *umulti_link; > > > + > > > + umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); > > > + kvfree(umulti_link->uprobes); > > > + kfree(umulti_link); > > > +} > > > + > > > +static const struct bpf_link_ops bpf_uprobe_multi_link_lops = { > > > + .release = bpf_uprobe_multi_link_release, > > > + .dealloc = bpf_uprobe_multi_link_dealloc, > > > +}; > > > + > > > +static int uprobe_prog_run(struct bpf_uprobe *uprobe, > > > + unsigned long entry_ip, > > > + struct pt_regs *regs) > > > +{ > > > + struct bpf_uprobe_multi_link *link = uprobe->link; > > > + struct bpf_uprobe_multi_run_ctx run_ctx = { > > > + .entry_ip = entry_ip, > > > + }; > > > + struct bpf_prog *prog = link->link.prog; > > > + struct bpf_run_ctx *old_run_ctx; > > > + int err = 0; > > > + > > > + might_fault(); > > > + > > > + rcu_read_lock_trace(); > > > > we don't need this if uprobe is not sleepable, right? why unconditional then? > > I won't pretend I understand what rcu_read_lock_trace does ;-) > > I tried to follow bpf_prog_run_array_sleepable where it's called > unconditionally for both sleepable and non-sleepable progs > > there are conditional rcu_read_un/lock calls later on > > I will check hm... Alexei can chime in here, but given here we actually are trying to run one BPF program (not entire array of them), we do know whether it's going to be sleepable or not. So we can avoid unnecessary rcu_read_{lock,unlock}_trace() calls. rcu_read_lock_trace() is used when there is going to be sleepable BPF program executed to protect BPF maps and other resources from being freed too soon. But if we know that we don't need sleepable, we can avoid that. > > > > > > + migrate_disable(); > > > + > > > + if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) > > > + goto out; > > > + > > > + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); > > > + > > > + if (!prog->aux->sleepable) > > > + rcu_read_lock(); > > > + > > > + err = bpf_prog_run(link->link.prog, regs); > > > + > > > + if (!prog->aux->sleepable) > > > + rcu_read_unlock(); > > > + > > > + bpf_reset_run_ctx(old_run_ctx); > > > + > > > +out: > > > + __this_cpu_dec(bpf_prog_active); > > > + migrate_enable(); > > > + rcu_read_unlock_trace(); > > > + return err; > > > +} > > > + > > > > [...] > > > > > + > > > + err = kern_path(name, LOOKUP_FOLLOW, &path); > > > + kfree(name); > > > + if (err) > > > + return err; > > > + > > > + if (!d_is_reg(path.dentry)) { > > > + err = -EINVAL; > > > + goto error_path_put; > > > + } > > > + > > > + err = -ENOMEM; > > > + > > > + link = kzalloc(sizeof(*link), GFP_KERNEL); > > > + uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL); > > > + ref_ctr_offsets = kvcalloc(cnt, sizeof(*ref_ctr_offsets), GFP_KERNEL); > > > > ref_ctr_offsets is optional, but we'll unconditionally allocate this array? > > true :-\ will add the uref_ctr_offsets check > > > > > > + > > > + if (!uprobes || !ref_ctr_offsets || !link) > > > + goto error_free; > > > + > > > + for (i = 0; i < cnt; i++) { > > > + if (uref_ctr_offsets && __get_user(ref_ctr_offset, uref_ctr_offsets + i)) { > > > + err = -EFAULT; > > > + goto error_free; > > > + } > > > + if (__get_user(offset, uoffsets + i)) { > > > + err = -EFAULT; > > > + goto error_free; > > > + } > > > + > > > + uprobes[i].offset = offset; > > > + uprobes[i].link = link; > > > + > > > + if (flags & BPF_F_UPROBE_MULTI_RETURN) > > > + uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler; > > > + else > > > + uprobes[i].consumer.handler = uprobe_multi_link_handler; > > > + > > > + ref_ctr_offsets[i] = ref_ctr_offset; > > > + } > > > + > > > + link->cnt = cnt; > > > + link->uprobes = uprobes; > > > + link->path = path; > > > + > > > + bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI, > > > + &bpf_uprobe_multi_link_lops, prog); > > > + > > > + err = bpf_link_prime(&link->link, &link_primer); > > > + if (err) > > > + goto error_free; > > > + > > > + for (i = 0; i < cnt; i++) { > > > + err = uprobe_register_refctr(d_real_inode(link->path.dentry), > > > + uprobes[i].offset, ref_ctr_offsets[i], > > > + &uprobes[i].consumer); > > > + if (err) { > > > + bpf_uprobe_unregister(&path, uprobes, i); > > > > bpf_link_cleanup() will do this through > > bpf_uprobe_multi_link_release(), no? So you are double unregistering? > > Either drop cnt to zero, or just don't do this here? Latter is better, > > IMO. > > bpf_link_cleanup path won't call release callback so we have to do that bpf_link_cleanup() does fput(primer->file); which eventually calls release callback, no? I'd add printk and simulate failure just to be sure > > I think I can add simple selftest to have this path covered > > thanks, > jirka > > > > > > + bpf_link_cleanup(&link_primer); > > > + kvfree(ref_ctr_offsets); > > > + return err; > > > + } > > > + } > > > + > > > + kvfree(ref_ctr_offsets); > > > + return bpf_link_settle(&link_primer); > > > + > > > +error_free: > > > + kvfree(ref_ctr_offsets); > > > + kvfree(uprobes); > > > + kfree(link); > > > +error_path_put: > > > + path_put(&path); > > > + return err; > > > +} > > > +#else /* !CONFIG_UPROBES */ > > > +int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) > > > +{ > > > + return -EOPNOTSUPP; > > > +} > > > > [...]