On 01/08/2023 08:30, Jiri Olsa wrote: > Adding support for bpf_get_func_ip helper for uprobe program to return > probed address for both uprobe and return uprobe. > > We discussed this in [1] and agreed that uprobe can have special use > of bpf_get_func_ip helper that differs from kprobe. > > The kprobe bpf_get_func_ip returns: > - address of the function if probe is attach on function entry > for both kprobe and return kprobe > - 0 if the probe is not attach on function entry > > The uprobe bpf_get_func_ip returns: > - address of the probe for both uprobe and return uprobe > > The reason for this semantic change is that kernel can't really tell > if the probe user space address is function entry. > > The uprobe program is actually kprobe type program attached as uprobe. > One of the consequences of this design is that uprobes do not have its > own set of helpers, but share them with kprobes. > > As we need different functionality for bpf_get_func_ip helper for uprobe, > I'm adding the bool value to the bpf_trace_run_ctx, so the helper can > detect that it's executed in uprobe context and call specific code. > > The is_uprobe bool is set as true in bpf_prog_run_array_sleepable which > is currently used only for executing bpf programs in uprobe. > > Suggested-by: Andrii Nakryiko <andrii@xxxxxxxxxx> > [1] https://lore.kernel.org/bpf/CAEf4BzZ=xLVkG5eurEuvLU79wAMtwho7ReR+XJAgwhFF4M-7Cg@xxxxxxxxxxxxxx/ > Signed-off-by: Jiri Olsa <jolsa@xxxxxxxxxx> > --- > include/linux/bpf.h | 5 +++++ > include/uapi/linux/bpf.h | 7 ++++++- > kernel/trace/bpf_trace.c | 21 ++++++++++++++++++++- > kernel/trace/trace_probe.h | 5 +++++ > kernel/trace/trace_uprobe.c | 5 ----- > tools/include/uapi/linux/bpf.h | 7 ++++++- > 6 files changed, 42 insertions(+), 8 deletions(-) > > diff --git a/include/linux/bpf.h b/include/linux/bpf.h > index ceaa8c23287f..8ea071383ef1 100644 > --- a/include/linux/bpf.h > +++ b/include/linux/bpf.h > @@ -1819,6 +1819,7 @@ struct bpf_cg_run_ctx { > struct bpf_trace_run_ctx { > struct bpf_run_ctx run_ctx; > u64 bpf_cookie; > + bool is_uprobe; > }; > > struct bpf_tramp_run_ctx { > @@ -1867,6 +1868,8 @@ bpf_prog_run_array(const struct bpf_prog_array *array, > if (unlikely(!array)) > return ret; > > + run_ctx.is_uprobe = false; > + > migrate_disable(); > old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); > item = &array->items[0]; > @@ -1906,6 +1909,8 @@ bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu, > rcu_read_lock_trace(); > migrate_disable(); > > + run_ctx.is_uprobe = true; > + > array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held()); > if (unlikely(!array)) > goto out; > diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h > index 70da85200695..d21deb46f49f 100644 > --- a/include/uapi/linux/bpf.h > +++ b/include/uapi/linux/bpf.h > @@ -5086,9 +5086,14 @@ union bpf_attr { > * u64 bpf_get_func_ip(void *ctx) > * Description > * Get address of the traced function (for tracing and kprobe programs). > + * > + * When called for kprobe program attached as uprobe it returns > + * probe address for both entry and return uprobe. > + * > * Return > - * Address of the traced function. > + * Address of the traced function for kprobe. > * 0 for kprobes placed within the function (not at the entry). > + * Address of the probe for uprobe and return uprobe. > * > * u64 bpf_get_attach_cookie(void *ctx) > * Description > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c > index c92eb8c6ff08..7930a91ca7f3 100644 > --- a/kernel/trace/bpf_trace.c > +++ b/kernel/trace/bpf_trace.c > @@ -1057,9 +1057,28 @@ static unsigned long get_entry_ip(unsigned long fentry_ip) > #define get_entry_ip(fentry_ip) fentry_ip > #endif > > +#ifdef CONFIG_UPROBES > +static unsigned long bpf_get_func_ip_uprobe(struct pt_regs *regs) > +{ > + struct uprobe_dispatch_data *udd; > + > + udd = (struct uprobe_dispatch_data *) current->utask->vaddr; > + return udd->bp_addr; > +} > +#else > +#define bpf_get_func_ip_uprobe(regs) (u64) -1 Small thing - I don't think this value is exposed to users due to the run_ctx->is_uprobe predicate, but would it be worth using -EOPNOTSUPP here maybe? > +#endif > + > BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs) > { > - struct kprobe *kp = kprobe_running(); > + struct bpf_trace_run_ctx *run_ctx; > + struct kprobe *kp; > + > + run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); > + if (run_ctx->is_uprobe) > + return bpf_get_func_ip_uprobe(regs); > + > + kp = kprobe_running(); > > if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY)) > return 0; > diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h > index 01ea148723de..7dde806be91e 100644 > --- a/kernel/trace/trace_probe.h > +++ b/kernel/trace/trace_probe.h > @@ -519,3 +519,8 @@ void __trace_probe_log_err(int offset, int err); > > #define trace_probe_log_err(offs, err) \ > __trace_probe_log_err(offs, TP_ERR_##err) > + > +struct uprobe_dispatch_data { > + struct trace_uprobe *tu; > + unsigned long bp_addr; > +}; > diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c > index 555c223c3232..fc76c3985672 100644 > --- a/kernel/trace/trace_uprobe.c > +++ b/kernel/trace/trace_uprobe.c > @@ -88,11 +88,6 @@ static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev) > static int register_uprobe_event(struct trace_uprobe *tu); > static int unregister_uprobe_event(struct trace_uprobe *tu); > > -struct uprobe_dispatch_data { > - struct trace_uprobe *tu; > - unsigned long bp_addr; > -}; > - > static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs); > static int uretprobe_dispatcher(struct uprobe_consumer *con, > unsigned long func, struct pt_regs *regs); > diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h > index 70da85200695..d21deb46f49f 100644 > --- a/tools/include/uapi/linux/bpf.h > +++ b/tools/include/uapi/linux/bpf.h > @@ -5086,9 +5086,14 @@ union bpf_attr { > * u64 bpf_get_func_ip(void *ctx) > * Description > * Get address of the traced function (for tracing and kprobe programs). > + * > + * When called for kprobe program attached as uprobe it returns > + * probe address for both entry and return uprobe. > + * > * Return > - * Address of the traced function. > + * Address of the traced function for kprobe. > * 0 for kprobes placed within the function (not at the entry). > + * Address of the probe for uprobe and return uprobe. > * > * u64 bpf_get_attach_cookie(void *ctx) > * Description