Re: [PATCH bpf] bpf: Set run context for rawtp test_run callback

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Jun 3, 2024 at 4:14 AM Jiri Olsa <jolsa@xxxxxxxxxx> wrote:
>
> syzbot reported crash when rawtp program executed through the
> test_run interface calls bpf_get_attach_cookie helper or any
> other helper that touches task->bpf_ctx pointer.
>
> We need to setup bpf_ctx pointer in rawtp test_run as well,
> so fixing this by moving __bpf_trace_run in header file and
> using it in test_run callback.
>
> Also renaming __bpf_trace_run to bpf_prog_run_trace.
>
> Fixes: 7adfc6c9b315 ("bpf: Add bpf_get_attach_cookie() BPF helper to access bpf_cookie value")
> Reported-by: syzbot+3ab78ff125b7979e45f9@xxxxxxxxxxxxxxxxxxxxxxxxx
> Closes: https://syzkaller.appspot.com/bug?extid=3ab78ff125b7979e45f9
> Signed-off-by: Jiri Olsa <jolsa@xxxxxxxxxx>
> ---
>  include/linux/bpf.h      | 27 +++++++++++++++++++++++++++
>  kernel/trace/bpf_trace.c | 28 ++--------------------------
>  net/bpf/test_run.c       |  4 +---
>  3 files changed, 30 insertions(+), 29 deletions(-)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index 5e694a308081..4eb803b1d308 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -2914,6 +2914,33 @@ static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
>  }
>  #endif /* CONFIG_BPF_SYSCALL */
>
> +static __always_inline int
> +bpf_prog_run_trace(struct bpf_prog *prog, u64 cookie, u64 *ctx,
> +                  bpf_prog_run_fn run_prog)
> +{
> +       struct bpf_run_ctx *old_run_ctx;
> +       struct bpf_trace_run_ctx run_ctx;
> +       int ret = -1;
> +
> +       cant_sleep();

I suspect you should see a splat with that.

Overall I think it's better to add empty run_ctx to
__bpf_prog_test_run_raw_tp()
instead of moving such a big function to .h

No need for prog->active increments. test_run is running
from syscall. If the same prog is attached somewhere as well
it may recurse once and it's fine imo.

pw-bot: cr

> +       if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
> +               bpf_prog_inc_misses_counter(prog);
> +               goto out;
> +       }
> +
> +       run_ctx.bpf_cookie = cookie;
> +       old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
> +
> +       rcu_read_lock();
> +       ret = run_prog(prog, ctx);
> +       rcu_read_unlock();
> +
> +       bpf_reset_run_ctx(old_run_ctx);
> +out:
> +       this_cpu_dec(*(prog->active));
> +       return ret;
> +}
> +
>  static __always_inline int
>  bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
>  {
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index d1daeab1bbc1..8a23ef42b76b 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -2383,31 +2383,6 @@ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
>         preempt_enable();
>  }
>
> -static __always_inline
> -void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
> -{
> -       struct bpf_prog *prog = link->link.prog;
> -       struct bpf_run_ctx *old_run_ctx;
> -       struct bpf_trace_run_ctx run_ctx;
> -
> -       cant_sleep();
> -       if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
> -               bpf_prog_inc_misses_counter(prog);
> -               goto out;
> -       }
> -
> -       run_ctx.bpf_cookie = link->cookie;
> -       old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
> -
> -       rcu_read_lock();
> -       (void) bpf_prog_run(prog, args);
> -       rcu_read_unlock();
> -
> -       bpf_reset_run_ctx(old_run_ctx);
> -out:
> -       this_cpu_dec(*(prog->active));
> -}
> -
>  #define UNPACK(...)                    __VA_ARGS__
>  #define REPEAT_1(FN, DL, X, ...)       FN(X)
>  #define REPEAT_2(FN, DL, X, ...)       FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
> @@ -2437,7 +2412,8 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
>         {                                                               \
>                 u64 args[x];                                            \
>                 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);                  \
> -               __bpf_trace_run(link, args);                            \
> +               (void) bpf_prog_run_trace(link->link.prog, link->cookie,\
> +                                         args, bpf_prog_run);          \
>         }                                                               \
>         EXPORT_SYMBOL_GPL(bpf_trace_run##x)
>  BPF_TRACE_DEFN_x(1);
> diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> index f6aad4ed2ab2..84d1c91b01ab 100644
> --- a/net/bpf/test_run.c
> +++ b/net/bpf/test_run.c
> @@ -728,9 +728,7 @@ __bpf_prog_test_run_raw_tp(void *data)
>  {
>         struct bpf_raw_tp_test_run_info *info = data;
>
> -       rcu_read_lock();
> -       info->retval = bpf_prog_run(info->prog, info->ctx);
> -       rcu_read_unlock();
> +       info->retval = bpf_prog_run_trace(info->prog, 0, info->ctx, bpf_prog_run);
>  }
>
>  int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
> --
> 2.45.1
>





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux