[PATCH v4 bpf-next 09/14] selftests/bpf: Add C tests for rdonly PTR_TO_BTF_ID

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Kumar Kartikeya Dwivedi <memxor@xxxxxxxxx>

Both the return value of bpf_kptr_xchg and load of read only kptr must
be marked with MEM_RDONLY.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@xxxxxxxxx>
Signed-off-by: Lorenzo Bianconi <lorenzo@xxxxxxxxxx>
---
 .../selftests/bpf/prog_tests/map_kptr.c       |   9 +-
 tools/testing/selftests/bpf/progs/map_kptr.c  |  31 ++++-
 .../selftests/bpf/progs/map_kptr_fail.c       | 114 ++++++++++++++++++
 3 files changed, 152 insertions(+), 2 deletions(-)

diff --git a/tools/testing/selftests/bpf/prog_tests/map_kptr.c b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
index fdcea7a61491..cca0bb51b752 100644
--- a/tools/testing/selftests/bpf/prog_tests/map_kptr.c
+++ b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
@@ -36,6 +36,13 @@ struct {
 	{ "reject_indirect_global_func_access", "kptr cannot be accessed indirectly by helper" },
 	{ "kptr_xchg_ref_state", "Unreleased reference id=5 alloc_insn=" },
 	{ "kptr_get_ref_state", "Unreleased reference id=3 alloc_insn=" },
+	{ "kptr_const_to_non_const", "invalid kptr access, R0 type=rdonly_ptr_" },
+	{ "kptr_const_to_non_const_xchg", "invalid kptr access, R2 type=rdonly_ptr_" },
+	{ "kptr_const_or_null_to_non_const_xchg", "invalid kptr access, R2 type=rdonly_ptr_or_null_" },
+	{ "mark_rdonly", "R1 type=rdonly_untrusted_ptr_or_null_ expected=percpu_ptr_" },
+	{ "mark_ref_rdonly", "R1 type=rdonly_untrusted_ptr_or_null_ expected=percpu_ptr_" },
+	{ "mark_xchg_rdonly", "R1 type=rdonly_ptr_or_null_ expected=percpu_ptr_" },
+	{ "kptr_get_no_const", "arg#0 cannot raise reference for pointer to const" },
 };
 
 static void test_map_kptr_fail_prog(const char *prog_name, const char *err_msg)
@@ -91,7 +98,7 @@ static void test_map_kptr_success(bool test_run)
 	);
 	struct map_kptr *skel;
 	int key = 0, ret;
-	char buf[16];
+	char buf[32];
 
 	skel = map_kptr__open_and_load();
 	if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load"))
diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c
index eb8217803493..f77689544e65 100644
--- a/tools/testing/selftests/bpf/progs/map_kptr.c
+++ b/tools/testing/selftests/bpf/progs/map_kptr.c
@@ -6,6 +6,8 @@
 struct map_value {
 	struct prog_test_ref_kfunc __kptr *unref_ptr;
 	struct prog_test_ref_kfunc __kptr_ref *ref_ptr;
+	const struct prog_test_ref_kfunc __kptr *const_unref_ptr;
+	const struct prog_test_ref_kfunc __kptr_ref *const_ref_ptr;
 };
 
 struct array_map {
@@ -58,12 +60,14 @@ DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_mallo
 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps);
 
 extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
+extern const struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire_const(void) __ksym;
 extern struct prog_test_ref_kfunc *
 bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym;
-extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
+extern void bpf_kfunc_call_test_release(const struct prog_test_ref_kfunc *p) __ksym;
 
 static void test_kptr_unref(struct map_value *v)
 {
+	const struct prog_test_ref_kfunc *pc;
 	struct prog_test_ref_kfunc *p;
 
 	p = v->unref_ptr;
@@ -77,10 +81,21 @@ static void test_kptr_unref(struct map_value *v)
 	v->unref_ptr = p;
 	/* store NULL */
 	v->unref_ptr = NULL;
+
+	pc = v->const_ref_ptr;
+	/* store rdonly_untrusted_ptr_or_null_ */
+	v->const_unref_ptr = pc;
+	if (!pc)
+		return;
+	/* store rdonly_untrusted_ptr_ */
+	v->const_unref_ptr = pc;
+	/* store NULL */
+	v->const_unref_ptr = NULL;
 }
 
 static void test_kptr_ref(struct map_value *v)
 {
+	const struct prog_test_ref_kfunc *pc;
 	struct prog_test_ref_kfunc *p;
 
 	p = v->ref_ptr;
@@ -114,6 +129,20 @@ static void test_kptr_ref(struct map_value *v)
 		return;
 	}
 	bpf_kfunc_call_test_release(p);
+
+	pc = bpf_kptr_xchg(&v->const_ref_ptr, NULL);
+	if (!pc)
+		return;
+	/* store rdonly_ptr_ */
+	v->const_unref_ptr = pc;
+	bpf_kfunc_call_test_release(pc);
+
+	pc = bpf_kfunc_call_test_acquire_const();
+	if (!pc)
+		return;
+	v->const_unref_ptr = pc;
+	bpf_kfunc_call_test_release(pc);
+	v->const_unref_ptr = v->const_ref_ptr;
 }
 
 static void test_kptr_get(struct map_value *v)
diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
index 05e209b1b12a..a1c4209a09e4 100644
--- a/tools/testing/selftests/bpf/progs/map_kptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
@@ -9,6 +9,8 @@ struct map_value {
 	struct prog_test_ref_kfunc __kptr *unref_ptr;
 	struct prog_test_ref_kfunc __kptr_ref *ref_ptr;
 	struct prog_test_member __kptr_ref *ref_memb_ptr;
+	const struct prog_test_ref_kfunc __kptr *const_unref_ptr;
+	const struct prog_test_ref_kfunc __kptr_ref *const_ref_ptr;
 };
 
 struct array_map {
@@ -19,6 +21,7 @@ struct array_map {
 } array_map SEC(".maps");
 
 extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
+extern const struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire_const(void) __ksym;
 extern struct prog_test_ref_kfunc *
 bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym;
 
@@ -415,4 +418,115 @@ int kptr_get_ref_state(struct __sk_buff *ctx)
 	return 0;
 }
 
+SEC("?tc")
+int kptr_const_to_non_const(struct __sk_buff *ctx)
+{
+	const struct prog_test_ref_kfunc *p;
+	struct map_value *v;
+	int key = 0;
+
+	v = bpf_map_lookup_elem(&array_map, &key);
+	if (!v)
+		return 0;
+
+	p = bpf_kfunc_call_test_acquire_const();
+	if (!p)
+		return 0;
+
+	v->unref_ptr = (void *)p;
+	return 0;
+}
+
+SEC("?tc")
+int kptr_const_to_non_const_xchg(struct __sk_buff *ctx)
+{
+	const struct prog_test_ref_kfunc *p;
+	struct map_value *v;
+	int key = 0;
+
+	v = bpf_map_lookup_elem(&array_map, &key);
+	if (!v)
+		return 0;
+
+	p = bpf_kfunc_call_test_acquire_const();
+	if (!p)
+		return 0;
+
+	bpf_kptr_xchg(&v->ref_ptr, p);
+	return 0;
+}
+
+SEC("?tc")
+int kptr_const_or_null_to_non_const_xchg(struct __sk_buff *ctx)
+{
+	const struct prog_test_ref_kfunc *p;
+	struct map_value *v;
+	int key = 0;
+
+	v = bpf_map_lookup_elem(&array_map, &key);
+	if (!v)
+		return 0;
+
+	p = bpf_kfunc_call_test_acquire_const();
+
+	bpf_kptr_xchg(&v->ref_ptr, p);
+	return 0;
+}
+
+SEC("?tc")
+int mark_rdonly(struct __sk_buff *ctx)
+{
+	struct map_value *v;
+	int key = 0;
+
+	v = bpf_map_lookup_elem(&array_map, &key);
+	if (!v)
+		return 0;
+
+	bpf_this_cpu_ptr(v->const_unref_ptr);
+	return 0;
+}
+
+SEC("?tc")
+int mark_ref_rdonly(struct __sk_buff *ctx)
+{
+	struct map_value *v;
+	int key = 0;
+
+	v = bpf_map_lookup_elem(&array_map, &key);
+	if (!v)
+		return 0;
+
+	bpf_this_cpu_ptr(v->const_ref_ptr);
+	return 0;
+}
+
+SEC("?tc")
+int mark_xchg_rdonly(struct __sk_buff *ctx)
+{
+	struct map_value *v;
+	int key = 0;
+
+	v = bpf_map_lookup_elem(&array_map, &key);
+	if (!v)
+		return 0;
+
+	bpf_this_cpu_ptr(bpf_kptr_xchg(&v->const_ref_ptr, NULL));
+	return 0;
+}
+
+SEC("?tc")
+int kptr_get_no_const(struct __sk_buff *ctx)
+{
+	struct map_value *v;
+	int key = 0;
+
+	v = bpf_map_lookup_elem(&array_map, &key);
+	if (!v)
+		return 0;
+
+	bpf_kfunc_call_test_kptr_get((void *)&v->const_ref_ptr, 0, 0);
+	return 0;
+}
+
 char _license[] SEC("license") = "GPL";
-- 
2.35.3




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux