[PATCH bpf-next v1 4/4] selftests/bpf: Add tests for kptr_ref refcounting

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Check at runtime how various operations for kptr_ref affect its refcount
and verify against the actual count. We use the per-CPU prog_test_struct
to get an isolated object to inspect the refcount of.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@xxxxxxxxx>
---
 .../selftests/bpf/prog_tests/map_kptr.c       |  28 ++++-
 tools/testing/selftests/bpf/progs/map_kptr.c  | 109 +++++++++++++++++-
 2 files changed, 132 insertions(+), 5 deletions(-)

diff --git a/tools/testing/selftests/bpf/prog_tests/map_kptr.c b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
index ffef3a319bac..bcee3e54e3ed 100644
--- a/tools/testing/selftests/bpf/prog_tests/map_kptr.c
+++ b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <test_progs.h>
+#include <network_helpers.h>
 
 #include "map_kptr.skel.h"
 #include "map_kptr_fail.skel.h"
@@ -81,8 +82,14 @@ static void test_map_kptr_fail(void)
 	}
 }
 
-static void test_map_kptr_success(void)
+static void test_map_kptr_success(bool test_run)
 {
+	LIBBPF_OPTS(bpf_test_run_opts, opts,
+		.data_in = &pkt_v4,
+		.data_size_in = sizeof(pkt_v4),
+		.repeat = 1,
+		.cpu = 0,
+	);
 	struct map_kptr *skel;
 	int key = 0, ret;
 	char buf[24];
@@ -91,6 +98,16 @@ static void test_map_kptr_success(void)
 	if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load"))
 		return;
 
+	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref), &opts);
+	ASSERT_OK(ret, "test_map_kptr_ref refcount");
+	ASSERT_OK(opts.retval, "test_map_kptr_ref retval");
+	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref2), &opts);
+	ASSERT_OK(ret, "test_map_kptr_ref2 refcount");
+	ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval");
+
+	if (test_run)
+		return;
+
 	ret = bpf_map_update_elem(bpf_map__fd(skel->maps.array_map), &key, buf, 0);
 	ASSERT_OK(ret, "array_map update");
 	ret = bpf_map_update_elem(bpf_map__fd(skel->maps.array_map), &key, buf, 0);
@@ -116,7 +133,12 @@ static void test_map_kptr_success(void)
 
 void test_map_kptr(void)
 {
-	if (test__start_subtest("success"))
-		test_map_kptr_success();
+	if (test__start_subtest("success")) {
+		test_map_kptr_success(false);
+		/* Do test_run twice, so that we see refcount going back to 1
+		 * after we leave it in map from first iteration.
+		 */
+		test_map_kptr_success(true);
+	}
 	test_map_kptr_fail();
 }
diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c
index 1b0e0409eaa5..569d7522bb9f 100644
--- a/tools/testing/selftests/bpf/progs/map_kptr.c
+++ b/tools/testing/selftests/bpf/progs/map_kptr.c
@@ -61,6 +61,7 @@ extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp
 extern struct prog_test_ref_kfunc *
 bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym;
 extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
+extern struct prog_test_ref_kfunc prog_test_struct __ksym;
 
 static void test_kptr_unref(struct map_value *v)
 {
@@ -141,7 +142,7 @@ SEC("tc")
 int test_map_kptr(struct __sk_buff *ctx)
 {
 	struct map_value *v;
-	int i, key = 0;
+	int key = 0;
 
 #define TEST(map)					\
 	v = bpf_map_lookup_elem(&map, &key);		\
@@ -162,7 +163,7 @@ SEC("tc")
 int test_map_in_map_kptr(struct __sk_buff *ctx)
 {
 	struct map_value *v;
-	int i, key = 0;
+	int key = 0;
 	void *map;
 
 #define TEST(map_in_map)                                \
@@ -187,4 +188,108 @@ int test_map_in_map_kptr(struct __sk_buff *ctx)
 	return 0;
 }
 
+SEC("tc")
+int test_map_kptr_ref(struct __sk_buff *ctx)
+{
+	struct prog_test_ref_kfunc *volatile p, *p_cpu;
+	unsigned long arg = 0;
+	struct map_value *v;
+	int key = 0, ret;
+
+	p_cpu = bpf_this_cpu_ptr(&prog_test_struct);
+	if (p_cpu->cnt.refs.counter != 1)
+		return 1;
+
+	p = bpf_kfunc_call_test_acquire(&arg);
+	if (!p)
+		return 2;
+	if (p != p_cpu || p_cpu->cnt.refs.counter != 2) {
+		ret = 3;
+		goto end;
+	}
+
+	v = bpf_map_lookup_elem(&array_map, &key);
+	if (!v) {
+		ret = 4;
+		goto end;
+	}
+
+	p = bpf_kptr_xchg(&v->ref_ptr, p);
+	if (p) {
+		ret = 5;
+		goto end;
+	}
+	if (p_cpu->cnt.refs.counter != 2)
+		return 6;
+
+	p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0);
+	if (!p)
+		return 7;
+	if (p_cpu->cnt.refs.counter != 3) {
+		ret = 8;
+		goto end;
+	}
+	bpf_kfunc_call_test_release(p);
+	if (p_cpu->cnt.refs.counter != 2)
+		return 9;
+
+	p = bpf_kptr_xchg(&v->ref_ptr, NULL);
+	if (!p)
+		return 10;
+	bpf_kfunc_call_test_release(p);
+	if (p_cpu->cnt.refs.counter != 1)
+		return 11;
+
+	p = bpf_kfunc_call_test_acquire(&arg);
+	if (!p)
+		return 12;
+	p = bpf_kptr_xchg(&v->ref_ptr, p);
+	if (p) {
+		ret = 13;
+		goto end;
+	}
+	if (p_cpu->cnt.refs.counter != 2)
+		return 14;
+	/* Leave in map */
+
+	return 0;
+end:
+	bpf_kfunc_call_test_release(p);
+	return ret;
+}
+
+SEC("tc")
+int test_map_kptr_ref2(struct __sk_buff *ctx)
+{
+	struct prog_test_ref_kfunc *volatile p, *p_cpu;
+	struct map_value *v;
+	int key = 0;
+
+	p_cpu = bpf_this_cpu_ptr(&prog_test_struct);
+	if (p_cpu->cnt.refs.counter != 2)
+		return 1;
+
+	v = bpf_map_lookup_elem(&array_map, &key);
+	if (!v)
+		return 2;
+
+	p = bpf_kptr_xchg(&v->ref_ptr, NULL);
+	if (!p)
+		return 3;
+	if (p != p_cpu || p_cpu->cnt.refs.counter != 2) {
+		bpf_kfunc_call_test_release(p);
+		return 4;
+	}
+
+	p = bpf_kptr_xchg(&v->ref_ptr, p);
+	if (p) {
+		bpf_kfunc_call_test_release(p);
+		return 5;
+	}
+	if (p_cpu->cnt.refs.counter != 2)
+		return 6;
+
+	return 0;
+}
+
 char _license[] SEC("license") = "GPL";
-- 
2.35.1




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux