Re: [PATCH v2 bpf-next 3/7] bpf: Add per-program recursion prevention mechanism

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Sat, Feb 6, 2021 at 9:05 AM Alexei Starovoitov
<alexei.starovoitov@xxxxxxxxx> wrote:
>
> From: Alexei Starovoitov <ast@xxxxxxxxxx>
>
> Since both sleepable and non-sleepable programs execute under migrate_disable
> add recursion prevention mechanism to both types of programs when they're
> executed via bpf trampoline.
>
> Signed-off-by: Alexei Starovoitov <ast@xxxxxxxxxx>
> ---
>  arch/x86/net/bpf_jit_comp.c                   | 15 +++++++++++++
>  include/linux/bpf.h                           |  6 ++---
>  include/linux/filter.h                        |  1 +
>  kernel/bpf/core.c                             |  8 +++++++
>  kernel/bpf/trampoline.c                       | 22 ++++++++++++++-----
>  .../selftests/bpf/prog_tests/fexit_stress.c   |  2 +-
>  .../bpf/prog_tests/trampoline_count.c         |  4 ++--
>  7 files changed, 47 insertions(+), 11 deletions(-)
>

[...]

> diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
> index b1f567514b7e..226f613ab289 100644
> --- a/kernel/bpf/trampoline.c
> +++ b/kernel/bpf/trampoline.c
> @@ -388,16 +388,21 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
>   * call prog->bpf_func
>   * call __bpf_prog_exit
>   */
> -#define NO_START_TIME 0
> -u64 notrace __bpf_prog_enter(void)
> +#define NO_START_TIME 1
> +u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
>         __acquires(RCU)
>  {
>         u64 start = NO_START_TIME;
>
>         rcu_read_lock();
>         migrate_disable();
> -       if (static_branch_unlikely(&bpf_stats_enabled_key))
> +       if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1))
> +               return 0;
> +       if (static_branch_unlikely(&bpf_stats_enabled_key)) {
>                 start = sched_clock();
> +               if (unlikely(!start))
> +                       start = NO_START_TIME;
> +       }
>         return start;
>  }
>
> @@ -425,25 +430,32 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
>         __releases(RCU)
>  {
>         update_prog_stats(prog, start);
> +       __this_cpu_dec(*(prog->active));
>         migrate_enable();
>         rcu_read_unlock();
>  }
>
> -u64 notrace __bpf_prog_enter_sleepable(void)
> +u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog)
>  {
>         u64 start = NO_START_TIME;
>
>         rcu_read_lock_trace();
>         migrate_disable();
>         might_fault();
> -       if (static_branch_unlikely(&bpf_stats_enabled_key))
> +       if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1))
> +               return 0;
> +       if (static_branch_unlikely(&bpf_stats_enabled_key)) {
>                 start = sched_clock();
> +               if (unlikely(!start))
> +                       start = NO_START_TIME;
> +       }
>         return start;


maybe extract this piece into a function, so that enter functions
would look like:

...
if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1))
         return 0;
return bpf_prog_start_time();

no need for u64 start initialization, more linear code, and no
duplication of logic?

Oh, and actually, given you have `start > NO_START_TIME` condition in
exit function, you don't need this `if (unlikely(!start))` bit at all,
because you are going to ignore both 0 and 1. So maybe no need for a
new function, but no need for extra if as well.

>  }
>
>  void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start)
>  {
>         update_prog_stats(prog, start);
> +       __this_cpu_dec(*(prog->active));
>         migrate_enable();
>         rcu_read_unlock_trace();
>  }
> diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
> index 3b9dbf7433f0..4698b0d2de36 100644
> --- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
> +++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
> @@ -3,7 +3,7 @@
>  #include <test_progs.h>
>
>  /* x86-64 fits 55 JITed and 43 interpreted progs into half page */

Probably the comment is a bit outdated now forcing you to decrease CNT?

> -#define CNT 40
> +#define CNT 38
>
>  void test_fexit_stress(void)
>  {
> diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
> index 781c8d11604b..f3022d934e2d 100644
> --- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
> +++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
> @@ -4,7 +4,7 @@
>  #include <sys/prctl.h>
>  #include <test_progs.h>
>
> -#define MAX_TRAMP_PROGS 40
> +#define MAX_TRAMP_PROGS 38
>
>  struct inst {
>         struct bpf_object *obj;
> @@ -52,7 +52,7 @@ void test_trampoline_count(void)
>         struct bpf_link *link;
>         char comm[16] = {};
>
> -       /* attach 'allowed' 40 trampoline programs */
> +       /* attach 'allowed' trampoline programs */
>         for (i = 0; i < MAX_TRAMP_PROGS; i++) {
>                 obj = bpf_object__open_file(object, NULL);
>                 if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj))) {
> --
> 2.24.1
>



[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux