Re: [PATCH v1 bpf-next 6/6] selftests/bpf: Add local kptr stashing test

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Mar 09, 2023 at 10:01:11AM -0800, Dave Marchevsky wrote:
> Add a new selftest, local_kptr_stash, which uses bpf_kptr_xchg to stash
> a bpf_obj_new-allocated object in a map.
> 
> Signed-off-by: Dave Marchevsky <davemarchevsky@xxxxxx>
> ---
>  .../bpf/prog_tests/local_kptr_stash.c         | 33 +++++++
>  .../selftests/bpf/progs/local_kptr_stash.c    | 85 +++++++++++++++++++
>  2 files changed, 118 insertions(+)
>  create mode 100644 tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c
>  create mode 100644 tools/testing/selftests/bpf/progs/local_kptr_stash.c
> 
> diff --git a/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c
> new file mode 100644
> index 000000000000..98353e602741
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c
> @@ -0,0 +1,33 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
> +
> +#include <test_progs.h>
> +#include <network_helpers.h>
> +
> +#include "local_kptr_stash.skel.h"
> +static void test_local_kptr_stash_simple(void)
> +{
> +	LIBBPF_OPTS(bpf_test_run_opts, opts,
> +		    .data_in = &pkt_v4,
> +		    .data_size_in = sizeof(pkt_v4),
> +		    .repeat = 1,
> +	);
> +	struct local_kptr_stash *skel;
> +	int ret;
> +
> +	skel = local_kptr_stash__open_and_load();
> +	if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load"))
> +		return;
> +
> +	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_rb_node), &opts);
> +	ASSERT_OK(ret, "local_kptr_stash_add_nodes run");
> +	ASSERT_OK(opts.retval, "local_kptr_stash_add_nodes retval");
> +
> +	local_kptr_stash__destroy(skel);
> +}
> +
> +void test_local_kptr_stash_success(void)
> +{
> +	if (test__start_subtest("rbtree_add_nodes"))
> +		test_local_kptr_stash_simple();
> +}
> diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash.c b/tools/testing/selftests/bpf/progs/local_kptr_stash.c
> new file mode 100644
> index 000000000000..df7b419f3dc3
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/progs/local_kptr_stash.c
> @@ -0,0 +1,85 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
> +
> +#include <vmlinux.h>
> +#include <bpf/bpf_tracing.h>
> +#include <bpf/bpf_helpers.h>
> +#include <bpf/bpf_core_read.h>
> +#include "bpf_experimental.h"
> +
> +struct node_data {
> +	long key;
> +	long data;
> +	struct bpf_rb_node node;
> +};
> +
> +struct map_value {
> +	struct prog_test_ref_kfunc *not_kptr;
> +	struct prog_test_ref_kfunc __kptr *val;
> +	struct node_data __kptr *node;
> +};
> +
> +/* This is necessary so that LLVM generates BTF for node_data struct
> + * If it's not included, a fwd reference for node_data will be generated but
> + * no struct. Example BTF of "node" field in map_value when not included:
> + *
> + * [10] PTR '(anon)' type_id=35
> + * [34] FWD 'node_data' fwd_kind=struct
> + * [35] TYPE_TAG 'kptr_ref' type_id=34
> + *
> + * (with no node_data struct defined)
> + * Had to do the same w/ bpf_kfunc_call_test_release below
> + */
> +struct node_data *just_here_because_btf_bug;
> +
> +extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
> +
> +struct {
> +        __uint(type, BPF_MAP_TYPE_ARRAY);
> +        __type(key, int);
> +        __type(value, struct map_value);
> +        __uint(max_entries, 1);
> +} some_nodes SEC(".maps");
> +
> +SEC("tc")
> +long stash_rb_node(void *ctx)
> +{
> +	struct map_value *mapval;
> +	struct node_data *res;
> +	int key = 0;
> +
> +	res = bpf_obj_new(typeof(*res));
> +	if (!res)
> +		return 1;
> +	res->key = 42;
> +
> +	mapval = bpf_map_lookup_elem(&some_nodes, &key);
> +	if (!mapval) {
> +		bpf_obj_drop(res);
> +		return 1;
> +	}
> +
> +	res = bpf_kptr_xchg(&mapval->node, res);
> +	if (res)
> +		bpf_obj_drop(res);

May be add another tc prog with 2nd bpf_prog_test_run_opts that does:
res = bpf_kptr_xchg(&mapval->node, NULL);
and bpf_obj_drop-s it for real?

The first stash_rb_node() can allocate two objs into key=0 and key=1
the 2nd prog can bpf_kptr_xchg only one of them,
so we test both dtor on map free and explicit xchg+obj_drop.

wdyt?



[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux