On Fri, Dec 15, 2023 at 12:11 PM Dmitrii Dolgov <9erthalion6@xxxxxxxxx> wrote: [...] > diff --git a/include/linux/bpf.h b/include/linux/bpf.h > index eb447b0a9423..e7393674ab94 100644 > --- a/include/linux/bpf.h > +++ b/include/linux/bpf.h > @@ -1414,6 +1414,7 @@ struct bpf_prog_aux { > bool dev_bound; /* Program is bound to the netdev. */ > bool offload_requested; /* Program is bound and offloaded to the netdev. */ > bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ > + bool attach_tracing_prog; /* true if tracing another tracing program */ > bool func_proto_unreliable; > bool sleepable; > bool tail_call_reachable; > diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c > index 5e43ddd1b83f..bcc5d5ab0870 100644 > --- a/kernel/bpf/syscall.c > +++ b/kernel/bpf/syscall.c > @@ -3040,8 +3040,10 @@ static void bpf_tracing_link_release(struct bpf_link *link) > bpf_trampoline_put(tr_link->trampoline); > > /* tgt_prog is NULL if target is a kernel function */ > - if (tr_link->tgt_prog) > + if (tr_link->tgt_prog) { > bpf_prog_put(tr_link->tgt_prog); > + link->prog->aux->attach_tracing_prog = false; > + } > } > > static void bpf_tracing_link_dealloc(struct bpf_link *link) > @@ -3243,6 +3245,12 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog, > goto out_unlock; > } > > + /* Bookkeeping for managing the prog attachment chain */ > + if (tgt_prog && > + prog->type == BPF_PROG_TYPE_TRACING && > + tgt_prog->type == BPF_PROG_TYPE_TRACING) > + prog->aux->attach_tracing_prog = true; > + > link->tgt_prog = tgt_prog; > link->trampoline = tr; > > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c > index 8e7b6072e3f4..f8c15ce8fd05 100644 > --- a/kernel/bpf/verifier.c > +++ b/kernel/bpf/verifier.c > @@ -20077,6 +20077,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, > struct bpf_attach_target_info *tgt_info) > { > bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; > + bool prog_tracing = prog->type == BPF_PROG_TYPE_TRACING; > const char prefix[] = "btf_trace_"; > int ret = 0, subprog = -1, i; > const struct btf_type *t; > @@ -20147,10 +20148,21 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, > bpf_log(log, "Can attach to only JITed progs\n"); > return -EINVAL; > } > - if (tgt_prog->type == prog->type) { > - /* Cannot fentry/fexit another fentry/fexit program. > - * Cannot attach program extension to another extension. > - * It's ok to attach fentry/fexit to extension program. > + if (prog_tracing) { > + if (aux->attach_tracing_prog) { > + /* > + * Target program is an fentry/fexit which is already attached > + * to another tracing program. More levels of nesting > + * attachment are not allowed. > + */ > + bpf_log(log, "Cannot nest tracing program attach more than once\n"); > + return -EINVAL; > + } If we add + prog->aux->attach_tracing_prog = true; here. We don't need the changes in syscall.c, right? IOW, we set attach_tracing_prog at program load time, not attach time. Would this work? Thanks, Song > + } else if (tgt_prog->type == prog->type) { > + /* > + * To avoid potential call chain cycles, prevent attaching of a > + * program extension to another extension. It's ok to attach > + * fentry/fexit to extension program.