Re: [PATCH v4 bpf-next 2/3] bpf: Introduce task_vma open-coded iterator kfuncs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Oct 2, 2023 at 12:53 PM Dave Marchevsky <davemarchevsky@xxxxxx> wrote:
>
> This patch adds kfuncs bpf_iter_task_vma_{new,next,destroy} which allow
> creation and manipulation of struct bpf_iter_task_vma in open-coded
> iterator style. BPF programs can use these kfuncs directly or through
> bpf_for_each macro for natural-looking iteration of all task vmas.
>
> The implementation borrows heavily from bpf_find_vma helper's locking -
> differing only in that it holds the mmap_read lock for all iterations
> while the helper only executes its provided callback on a maximum of 1
> vma. Aside from locking, struct vma_iterator and vma_next do all the
> heavy lifting.
>
> The newly-added struct bpf_iter_task_vma has a name collision with a
> selftest for the seq_file task_vma iter's bpf skel, so the selftests/bpf/progs
> file is renamed in order to avoid the collision.
>
> A pointer to an inner data struct, struct bpf_iter_task_vma_data, is the
> only field in struct bpf_iter_task_vma. This is because the inner data
> struct contains a struct vma_iterator (not ptr), whose size is likely to
> change under us. If bpf_iter_task_vma_kern contained vma_iterator directly
> such a change would require change in opaque bpf_iter_task_vma struct's
> size. So better to allocate vma_iterator using BPF allocator, and since
> that alloc must already succeed, might as well allocate all iter fields,
> thereby freezing struct bpf_iter_task_vma size.
>
> Signed-off-by: Dave Marchevsky <davemarchevsky@xxxxxx>
> Cc: Nathan Slingerland <slinger@xxxxxxxx>
> ---
>  kernel/bpf/helpers.c                          |  3 +
>  kernel/bpf/task_iter.c                        | 85 +++++++++++++++++++
>  tools/lib/bpf/bpf_helpers.h                   |  8 ++
>  .../selftests/bpf/prog_tests/bpf_iter.c       | 26 +++---
>  ...f_iter_task_vma.c => bpf_iter_task_vmas.c} |  0
>  5 files changed, 109 insertions(+), 13 deletions(-)
>  rename tools/testing/selftests/bpf/progs/{bpf_iter_task_vma.c => bpf_iter_task_vmas.c} (100%)
>
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index dd1c69ee3375..6b2373db65bd 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -2549,6 +2549,9 @@ BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL)
>  BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW)
>  BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL)
>  BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY)
> +BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU)
> +BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL)
> +BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY)
>  BTF_ID_FLAGS(func, bpf_dynptr_adjust)
>  BTF_ID_FLAGS(func, bpf_dynptr_is_null)
>  BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
> diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
> index 7473068ed313..5c8e559be5e5 100644
> --- a/kernel/bpf/task_iter.c
> +++ b/kernel/bpf/task_iter.c
> @@ -7,7 +7,9 @@
>  #include <linux/fs.h>
>  #include <linux/fdtable.h>
>  #include <linux/filter.h>
> +#include <linux/bpf_mem_alloc.h>
>  #include <linux/btf_ids.h>
> +#include <linux/mm_types.h>
>  #include "mmap_unlock_work.h"
>
>  static const char * const iter_task_type_names[] = {
> @@ -803,6 +805,89 @@ const struct bpf_func_proto bpf_find_vma_proto = {
>         .arg5_type      = ARG_ANYTHING,
>  };
>
> +struct bpf_iter_task_vma_kern_data {
> +       struct task_struct *task;
> +       struct mm_struct *mm;
> +       struct mmap_unlock_irq_work *work;
> +       struct vma_iterator vmi;
> +};
> +
> +struct bpf_iter_task_vma {
> +       /* opaque iterator state; having __u64 here allows to preserve correct
> +        * alignment requirements in vmlinux.h, generated from BTF
> +        */
> +       __u64 __opaque[1];
> +} __attribute__((aligned(8)));
> +
> +/* Non-opaque version of bpf_iter_task_vma */
> +struct bpf_iter_task_vma_kern {
> +       struct bpf_iter_task_vma_kern_data *data;
> +} __attribute__((aligned(8)));
> +
> +__bpf_kfunc int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
> +                                     struct task_struct *task, u64 addr)
> +{
> +       struct bpf_iter_task_vma_kern *kit = (void *)it;
> +       bool irq_work_busy = false;
> +       int err;
> +
> +       BUILD_BUG_ON(sizeof(struct bpf_iter_task_vma_kern) != sizeof(struct bpf_iter_task_vma));
> +       BUILD_BUG_ON(__alignof__(struct bpf_iter_task_vma_kern) != __alignof__(struct bpf_iter_task_vma));
> +
> +       /* is_iter_reg_valid_uninit guarantees that kit hasn't been initialized
> +        * before, so non-NULL kit->data doesn't point to previously
> +        * bpf_mem_alloc'd bpf_iter_task_vma_kern_data
> +        */
> +       kit->data = bpf_mem_alloc(&bpf_global_ma, sizeof(struct bpf_iter_task_vma_kern_data));
> +       if (!kit->data)
> +               return -ENOMEM;
> +
> +       kit->data->task = get_task_struct(task);
> +       kit->data->mm = task->mm;
> +       if (!kit->data->mm) {
> +               err = -ENOENT;
> +               goto err_cleanup_iter;
> +       }
> +
> +       /* kit->data->work == NULL is valid after bpf_mmap_unlock_get_irq_work */
> +       irq_work_busy = bpf_mmap_unlock_get_irq_work(&kit->data->work);
> +       if (irq_work_busy || !mmap_read_trylock(kit->data->mm)) {
> +               err = -EBUSY;
> +               goto err_cleanup_iter;
> +       }
> +
> +       vma_iter_init(&kit->data->vmi, kit->data->mm, addr);
> +       return 0;
> +
> +err_cleanup_iter:
> +       if (kit->data->task)
> +               put_task_struct(kit->data->task);
> +       bpf_mem_free(&bpf_global_ma, kit->data);
> +       /* NULL kit->data signals failed bpf_iter_task_vma initialization */
> +       kit->data = NULL;
> +       return err;
> +}
> +
> +__bpf_kfunc struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it)
> +{
> +       struct bpf_iter_task_vma_kern *kit = (void *)it;
> +
> +       if (!kit->data) /* bpf_iter_task_vma_new failed */
> +               return NULL;
> +       return vma_next(&kit->data->vmi);
> +}
> +
> +__bpf_kfunc void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it)
> +{
> +       struct bpf_iter_task_vma_kern *kit = (void *)it;
> +
> +       if (kit->data) {
> +               bpf_mmap_unlock_mm(kit->data->work, kit->data->mm);
> +               put_task_struct(kit->data->task);
> +               bpf_mem_free(&bpf_global_ma, kit->data);
> +       }
> +}
> +
>  DEFINE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);
>
>  static void do_mmap_read_unlock(struct irq_work *entry)
> diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
> index 77ceea575dc7..c6abb7fd8d73 100644
> --- a/tools/lib/bpf/bpf_helpers.h
> +++ b/tools/lib/bpf/bpf_helpers.h
> @@ -303,6 +303,14 @@ extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __weak
>  extern int *bpf_iter_num_next(struct bpf_iter_num *it) __weak __ksym;
>  extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __weak __ksym;
>
> +struct bpf_iter_task_vma;
> +
> +extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
> +                                struct task_struct *task,
> +                                unsigned long addr) __weak __ksym;
> +extern struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) __weak __ksym;
> +extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __weak __ksym;

hm... still in bpf_helpers.h

Also, let's split this change out from kernel changes, there is no
need to couple these changes with kernel patches.

> +
>  #ifndef bpf_for_each
>  /* bpf_for_each(iter_type, cur_elem, args...) provides generic construct for
>   * using BPF open-coded iterators without having to write mundane explicit
> diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
> index 1f02168103dd..41aba139b20b 100644
> --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
> +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
> @@ -10,7 +10,7 @@
>  #include "bpf_iter_task.skel.h"
>  #include "bpf_iter_task_stack.skel.h"
>  #include "bpf_iter_task_file.skel.h"
> -#include "bpf_iter_task_vma.skel.h"
> +#include "bpf_iter_task_vmas.skel.h"
>  #include "bpf_iter_task_btf.skel.h"
>  #include "bpf_iter_tcp4.skel.h"
>  #include "bpf_iter_tcp6.skel.h"
> @@ -1399,19 +1399,19 @@ static void str_strip_first_line(char *str)
>  static void test_task_vma_common(struct bpf_iter_attach_opts *opts)
>  {
>         int err, iter_fd = -1, proc_maps_fd = -1;
> -       struct bpf_iter_task_vma *skel;
> +       struct bpf_iter_task_vmas *skel;
>         int len, read_size = 4;
>         char maps_path[64];
>
> -       skel = bpf_iter_task_vma__open();
> -       if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open"))
> +       skel = bpf_iter_task_vmas__open();
> +       if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vmas__open"))
>                 return;
>
>         skel->bss->pid = getpid();
>         skel->bss->one_task = opts ? 1 : 0;
>
> -       err = bpf_iter_task_vma__load(skel);
> -       if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
> +       err = bpf_iter_task_vmas__load(skel);
> +       if (!ASSERT_OK(err, "bpf_iter_task_vmas__load"))
>                 goto out;
>
>         skel->links.proc_maps = bpf_program__attach_iter(
> @@ -1462,25 +1462,25 @@ static void test_task_vma_common(struct bpf_iter_attach_opts *opts)
>  out:
>         close(proc_maps_fd);
>         close(iter_fd);
> -       bpf_iter_task_vma__destroy(skel);
> +       bpf_iter_task_vmas__destroy(skel);
>  }
>
>  static void test_task_vma_dead_task(void)
>  {
> -       struct bpf_iter_task_vma *skel;
> +       struct bpf_iter_task_vmas *skel;
>         int wstatus, child_pid = -1;
>         time_t start_tm, cur_tm;
>         int err, iter_fd = -1;
>         int wait_sec = 3;
>
> -       skel = bpf_iter_task_vma__open();
> -       if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open"))
> +       skel = bpf_iter_task_vmas__open();
> +       if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vmas__open"))
>                 return;
>
>         skel->bss->pid = getpid();
>
> -       err = bpf_iter_task_vma__load(skel);
> -       if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
> +       err = bpf_iter_task_vmas__load(skel);
> +       if (!ASSERT_OK(err, "bpf_iter_task_vmas__load"))
>                 goto out;
>
>         skel->links.proc_maps = bpf_program__attach_iter(
> @@ -1533,7 +1533,7 @@ static void test_task_vma_dead_task(void)
>  out:
>         waitpid(child_pid, &wstatus, 0);
>         close(iter_fd);
> -       bpf_iter_task_vma__destroy(skel);
> +       bpf_iter_task_vmas__destroy(skel);
>  }
>
>  void test_bpf_sockmap_map_iter_fd(void)
> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c
> similarity index 100%
> rename from tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c
> rename to tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c

let's do this in a separate pre-patch?

> --
> 2.34.1
>





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux