Re: [PATCH bpf-next v3 06/11] bpf: add arraymap support for bpf_for_each_map_elem() helper

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Feb 25, 2021 at 1:35 AM Yonghong Song <yhs@xxxxxx> wrote:
>
> This patch added support for arraymap and percpu arraymap.
>
> Signed-off-by: Yonghong Song <yhs@xxxxxx>
> ---

index_mask is overcautious in this case, but otherwise lgtm

Acked-by: Andrii Nakryiko <andrii@xxxxxxxxxx>

>  kernel/bpf/arraymap.c | 40 ++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 40 insertions(+)
>
> diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
> index 1f8453343bf2..4077a8ae7089 100644
> --- a/kernel/bpf/arraymap.c
> +++ b/kernel/bpf/arraymap.c
> @@ -625,6 +625,42 @@ static const struct bpf_iter_seq_info iter_seq_info = {
>         .seq_priv_size          = sizeof(struct bpf_iter_seq_array_map_info),
>  };
>
> +static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn,
> +                                  void *callback_ctx, u64 flags)
> +{
> +       u32 i, index, num_elems = 0;
> +       struct bpf_array *array;
> +       bool is_percpu;
> +       u64 ret = 0;
> +       void *val;
> +
> +       if (flags != 0)
> +               return -EINVAL;
> +
> +       is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
> +       array = container_of(map, struct bpf_array, map);
> +       if (is_percpu)
> +               migrate_disable();
> +       for (i = 0; i < map->max_entries; i++) {
> +               index = i & array->index_mask;

I don't think you need to use index_mask here, given you control i and
know that it will always be < map->max_entries.

> +               if (is_percpu)
> +                       val = this_cpu_ptr(array->pptrs[i]);
> +               else
> +                       val = array->value + array->elem_size * i;
> +               num_elems++;
> +               ret = BPF_CAST_CALL(callback_fn)((u64)(long)map,
> +                                       (u64)(long)&index, (u64)(long)val,
> +                                       (u64)(long)callback_ctx, 0);
> +               /* return value: 0 - continue, 1 - stop and return */
> +               if (ret)
> +                       break;
> +       }
> +
> +       if (is_percpu)
> +               migrate_enable();
> +       return num_elems;
> +}
> +
>  static int array_map_btf_id;
>  const struct bpf_map_ops array_map_ops = {
>         .map_meta_equal = array_map_meta_equal,
> @@ -643,6 +679,8 @@ const struct bpf_map_ops array_map_ops = {
>         .map_check_btf = array_map_check_btf,
>         .map_lookup_batch = generic_map_lookup_batch,
>         .map_update_batch = generic_map_update_batch,
> +       .map_set_for_each_callback_args = map_set_for_each_callback_args,
> +       .map_for_each_callback = bpf_for_each_array_elem,
>         .map_btf_name = "bpf_array",
>         .map_btf_id = &array_map_btf_id,
>         .iter_seq_info = &iter_seq_info,
> @@ -660,6 +698,8 @@ const struct bpf_map_ops percpu_array_map_ops = {
>         .map_delete_elem = array_map_delete_elem,
>         .map_seq_show_elem = percpu_array_map_seq_show_elem,
>         .map_check_btf = array_map_check_btf,
> +       .map_set_for_each_callback_args = map_set_for_each_callback_args,
> +       .map_for_each_callback = bpf_for_each_array_elem,
>         .map_btf_name = "bpf_array",
>         .map_btf_id = &percpu_array_map_btf_id,
>         .iter_seq_info = &iter_seq_info,
> --
> 2.24.1
>



[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux