On Thu, Sep 7, 2023 at 12:14 AM Jiri Olsa <jolsa@xxxxxxxxxx> wrote: > > Increase misses stats in case bpf array execution is skipped > because of recursion check in trace_call_bpf. > > Adding bpf_prog_inc_misses_counters that increase misses > counts for all bpf programs in bpf_prog_array. > > Signed-off-by: Jiri Olsa <jolsa@xxxxxxxxxx> Reviewed-and-tested-by: Song Liu <song@xxxxxxxxxx> > --- > include/linux/bpf.h | 16 ++++++++++++++++ > kernel/trace/bpf_trace.c | 3 +++ > 2 files changed, 19 insertions(+) > > diff --git a/include/linux/bpf.h b/include/linux/bpf.h > index 87eeb3a46a1d..abc18d6f2f2e 100644 > --- a/include/linux/bpf.h > +++ b/include/linux/bpf.h > @@ -2911,6 +2911,22 @@ static inline int sock_map_bpf_prog_query(const union bpf_attr *attr, > #endif /* CONFIG_BPF_SYSCALL */ > #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ > > +static __always_inline void > +bpf_prog_inc_misses_counters(const struct bpf_prog_array *array) > +{ > + const struct bpf_prog_array_item *item; > + struct bpf_prog *prog; > + > + if (unlikely(!array)) > + return; > + > + item = &array->items[0]; > + while ((prog = READ_ONCE(item->prog))) { > + bpf_prog_inc_misses_counter(prog); > + item++; > + } > +} > + > #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) > void bpf_sk_reuseport_detach(struct sock *sk); > int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c > index a9d8634b503c..44f399b19af1 100644 > --- a/kernel/trace/bpf_trace.c > +++ b/kernel/trace/bpf_trace.c > @@ -117,6 +117,9 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) > * and don't send kprobe event into ring-buffer, > * so return zero here > */ > + rcu_read_lock(); > + bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array)); > + rcu_read_unlock(); > ret = 0; > goto out; > } > -- > 2.41.0 > >