[PATCH v1 bpf-next 2/2] selftests/bpf: Add test exercising mmapable task_local_storage

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch tests mmapable task_local storage functionality added earlier
in the series. The success tests focus on verifying correctness of the
various ways of reading from and writing to mmapable task_local storage:

  * Write through mmap'd region should be visible when BPF program
    makes bpf_task_storage_get call
  * If BPF program reads-and-incrs the mapval, the new value should be
    visible when userspace reads from mmap'd region or does
    map_lookup_elem call
  * If userspace does map_update_elem call, new value should be visible
    when userspace reads from mmap'd region or does map_lookup_elem
    call
  * After bpf_map_delete_elem, reading from mmap'd region should still
    succeed, but map_lookup_elem w/o BPF_LOCAL_STORAGE_GET_F_CREATE flag
    should fail
  * After bpf_map_delete_elem, creating a new map_val via mmap call
    should return a different memory region

Signed-off-by: Dave Marchevsky <davemarchevsky@xxxxxx>
---
 .../bpf/prog_tests/task_local_storage.c       | 177 ++++++++++++++++++
 .../bpf/progs/task_local_storage__mmap.c      |  59 ++++++
 .../bpf/progs/task_local_storage__mmap.h      |   7 +
 .../bpf/progs/task_local_storage__mmap_fail.c |  39 ++++
 4 files changed, 282 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/progs/task_local_storage__mmap.c
 create mode 100644 tools/testing/selftests/bpf/progs/task_local_storage__mmap.h
 create mode 100644 tools/testing/selftests/bpf/progs/task_local_storage__mmap_fail.c

diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
index ea8537c54413..08c589a12bd6 100644
--- a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
+++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
@@ -5,14 +5,19 @@
 #include <unistd.h>
 #include <sched.h>
 #include <pthread.h>
+#include <sys/mman.h>   /* For mmap and associated flags */
 #include <sys/syscall.h>   /* For SYS_xxx definitions */
 #include <sys/types.h>
 #include <test_progs.h>
+#include <network_helpers.h>
 #include "task_local_storage_helpers.h"
 #include "task_local_storage.skel.h"
 #include "task_local_storage_exit_creds.skel.h"
+#include "task_local_storage__mmap.skel.h"
+#include "task_local_storage__mmap_fail.skel.h"
 #include "task_ls_recursion.skel.h"
 #include "task_storage_nodeadlock.skel.h"
+#include "progs/task_local_storage__mmap.h"
 
 static void test_sys_enter_exit(void)
 {
@@ -40,6 +45,173 @@ static void test_sys_enter_exit(void)
 	task_local_storage__destroy(skel);
 }
 
+static int basic_mmapable_read_write(struct task_local_storage__mmap *skel,
+				     long *mmaped_task_local)
+{
+	int err;
+
+	*mmaped_task_local = 42;
+
+	err = task_local_storage__mmap__attach(skel);
+	if (!ASSERT_OK(err, "skel_attach"))
+		return -1;
+
+	syscall(SYS_gettid);
+	ASSERT_EQ(skel->bss->mmaped_mapval, 42, "mmaped_mapval");
+
+	/* Incr from userspace should be visible when BPF prog reads */
+	*mmaped_task_local = *mmaped_task_local + 1;
+	syscall(SYS_gettid);
+	ASSERT_EQ(skel->bss->mmaped_mapval, 43, "mmaped_mapval_user_incr");
+
+	/* Incr from BPF prog should be visible from userspace */
+	skel->bss->read_and_incr = 1;
+	syscall(SYS_gettid);
+	ASSERT_EQ(skel->bss->mmaped_mapval, 44, "mmaped_mapval_bpf_incr");
+	ASSERT_EQ(skel->bss->mmaped_mapval, *mmaped_task_local, "bpf_incr_eq");
+	skel->bss->read_and_incr = 0;
+
+	return 0;
+}
+
+static void test_sys_enter_mmap(void)
+{
+	struct task_local_storage__mmap *skel;
+	long *task_local, *task_local2, value;
+	int err, task_fd, map_fd;
+
+	task_local = task_local2 = (long *)-1;
+	task_fd = sys_pidfd_open(getpid(), 0);
+	if (!ASSERT_NEQ(task_fd, -1, "sys_pidfd_open"))
+		return;
+
+	skel = task_local_storage__mmap__open_and_load();
+	if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) {
+		close(task_fd);
+		return;
+	}
+
+	map_fd = bpf_map__fd(skel->maps.mmapable);
+	task_local = mmap(NULL, sizeof(long), PROT_READ | PROT_WRITE,
+			  MAP_SHARED, map_fd, 0);
+	if (!ASSERT_OK_PTR(task_local, "mmap_task_local_storage"))
+		goto out;
+
+	err = basic_mmapable_read_write(skel, task_local);
+	if (!ASSERT_OK(err, "basic_mmapable_read_write"))
+		goto out;
+
+	err = bpf_map_lookup_elem(map_fd, &task_fd, &value);
+	if (!ASSERT_OK(err, "bpf_map_lookup_elem") ||
+	    !ASSERT_EQ(value, 44, "bpf_map_lookup_elem value"))
+		goto out;
+
+	value = 148;
+	bpf_map_update_elem(map_fd, &task_fd, &value, BPF_EXIST);
+	if (!ASSERT_EQ(READ_ONCE(*task_local), 148, "mmaped_read_after_update"))
+		goto out;
+
+	err = bpf_map_lookup_elem(map_fd, &task_fd, &value);
+	if (!ASSERT_OK(err, "bpf_map_lookup_elem") ||
+	    !ASSERT_EQ(value, 148, "bpf_map_lookup_elem value"))
+		goto out;
+
+	/* The mmapable page is not released by map_delete_elem, but no longer
+	 * linked to local_storage
+	 */
+	err = bpf_map_delete_elem(map_fd, &task_fd);
+	if (!ASSERT_OK(err, "bpf_map_delete_elem") ||
+	    !ASSERT_EQ(READ_ONCE(*task_local), 148, "mmaped_read_after_delete"))
+		goto out;
+
+	err = bpf_map_lookup_elem(map_fd, &task_fd, &value);
+	if (!ASSERT_EQ(err, -ENOENT, "bpf_map_lookup_elem_after_delete"))
+		goto out;
+
+	task_local_storage__mmap__destroy(skel);
+
+	/* The mmapable page is not released when __destroy unloads the map.
+	 * It will stick around until we munmap it
+	 */
+	*task_local = -999;
+
+	/* Although task_local's page is still around, it won't be reused */
+	skel = task_local_storage__mmap__open_and_load();
+	if (!ASSERT_OK_PTR(skel, "skel_open_and_load2"))
+		return;
+
+	map_fd = bpf_map__fd(skel->maps.mmapable);
+	err = task_local_storage__mmap__attach(skel);
+	if (!ASSERT_OK(err, "skel_attach2"))
+		goto out;
+
+	skel->bss->read_and_incr = 1;
+	skel->bss->create_flag = BPF_LOCAL_STORAGE_GET_F_CREATE;
+	syscall(SYS_gettid);
+	ASSERT_EQ(skel->bss->mmaped_mapval, 1, "mmaped_mapval2");
+
+	skel->bss->read_and_incr = 0;
+	task_local2 = mmap(NULL, sizeof(long), PROT_READ | PROT_WRITE,
+			   MAP_SHARED, map_fd, 0);
+	if (!ASSERT_OK_PTR(task_local, "mmap_task_local_storage2"))
+		goto out;
+
+	if (!ASSERT_NEQ(task_local, task_local2, "second_mmap_address"))
+		goto out;
+
+	ASSERT_EQ(READ_ONCE(*task_local2), 1, "mmaped_mapval2_bpf_create_incr");
+
+out:
+	close(task_fd);
+	if (task_local > 0)
+		munmap(task_local, sizeof(long));
+	if (task_local2 > 0)
+		munmap(task_local2, sizeof(long));
+	task_local_storage__mmap__destroy(skel);
+}
+
+static void test_sys_enter_mmap_big_mapval(void)
+{
+	struct two_page_struct *task_local, value;
+	struct task_local_storage__mmap *skel;
+	int task_fd, map_fd, err;
+
+	task_local = (struct two_page_struct *)-1;
+	task_fd = sys_pidfd_open(getpid(), 0);
+	if (!ASSERT_NEQ(task_fd, -1, "sys_pidfd_open"))
+		return;
+
+	skel = task_local_storage__mmap__open_and_load();
+	if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) {
+		close(task_fd);
+		return;
+	}
+	map_fd = bpf_map__fd(skel->maps.mmapable_two_pages);
+	task_local = mmap(NULL, sizeof(struct two_page_struct),
+			  PROT_READ | PROT_WRITE, MAP_SHARED,
+			  map_fd, 0);
+	if (!ASSERT_OK_PTR(task_local, "mmap_task_local_storage"))
+		goto out;
+
+	skel->bss->use_big_mapval = 1;
+	err = basic_mmapable_read_write(skel, &task_local->val);
+	if (!ASSERT_OK(err, "basic_mmapable_read_write"))
+		goto out;
+
+	task_local->c[4096] = 'z';
+
+	err = bpf_map_lookup_elem(map_fd, &task_fd, &value);
+	if (!ASSERT_OK(err, "bpf_map_lookup_elem") ||
+	    !ASSERT_EQ(value.val, 44, "bpf_map_lookup_elem value"))
+		goto out;
+
+out:
+	close(task_fd);
+	if (task_local > 0)
+		munmap(task_local, sizeof(struct two_page_struct));
+	task_local_storage__mmap__destroy(skel);
+}
+
 static void test_exit_creds(void)
 {
 	struct task_local_storage_exit_creds *skel;
@@ -237,10 +409,15 @@ void test_task_local_storage(void)
 {
 	if (test__start_subtest("sys_enter_exit"))
 		test_sys_enter_exit();
+	if (test__start_subtest("sys_enter_mmap"))
+		test_sys_enter_mmap();
+	if (test__start_subtest("sys_enter_mmap_big_mapval"))
+		test_sys_enter_mmap_big_mapval();
 	if (test__start_subtest("exit_creds"))
 		test_exit_creds();
 	if (test__start_subtest("recursion"))
 		test_recursion();
 	if (test__start_subtest("nodeadlock"))
 		test_nodeadlock();
+	RUN_TESTS(task_local_storage__mmap_fail);
 }
diff --git a/tools/testing/selftests/bpf/progs/task_local_storage__mmap.c b/tools/testing/selftests/bpf/progs/task_local_storage__mmap.c
new file mode 100644
index 000000000000..1c8850c8d189
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_local_storage__mmap.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "task_local_storage__mmap.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+	__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+	__uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_MMAPABLE);
+	__type(key, int);
+	__type(value, long);
+} mmapable SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+	__uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_MMAPABLE);
+	__type(key, int);
+	__type(value, struct two_page_struct);
+} mmapable_two_pages SEC(".maps");
+
+long mmaped_mapval = 0;
+int read_and_incr = 0;
+int create_flag = 0;
+int use_big_mapval = 0;
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(on_enter, struct pt_regs *regs, long id)
+{
+	struct two_page_struct *big_mapval;
+	struct task_struct *task;
+	long *ptr;
+
+	task = bpf_get_current_task_btf();
+	if (!task)
+		return 1;
+
+	if (use_big_mapval) {
+		big_mapval = bpf_task_storage_get(&mmapable_two_pages, task, 0,
+						  create_flag);
+		if (!big_mapval)
+			return 2;
+		ptr = &big_mapval->val;
+	} else {
+		ptr = bpf_task_storage_get(&mmapable, task, 0, create_flag);
+	}
+
+	if (!ptr)
+		return 3;
+
+	if (read_and_incr)
+		*ptr = *ptr + 1;
+
+	mmaped_mapval = *ptr;
+	return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_local_storage__mmap.h b/tools/testing/selftests/bpf/progs/task_local_storage__mmap.h
new file mode 100644
index 000000000000..f4a3264142c2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_local_storage__mmap.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+struct two_page_struct {
+	long val;
+	char c[4097];
+};
diff --git a/tools/testing/selftests/bpf/progs/task_local_storage__mmap_fail.c b/tools/testing/selftests/bpf/progs/task_local_storage__mmap_fail.c
new file mode 100644
index 000000000000..f32c5bfe370a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_local_storage__mmap_fail.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+	__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+	__uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_MMAPABLE);
+	__type(key, int);
+	__type(value, long);
+} mmapable SEC(".maps");
+
+__failure __msg("invalid access to map value, value_size=8 off=8 size=8")
+SEC("tp_btf/sys_enter")
+long BPF_PROG(fail_read_past_mapval_end, struct pt_regs *regs, long id)
+{
+	struct task_struct *task;
+	long *ptr;
+	long res;
+
+	task = bpf_get_current_task_btf();
+	if (!task)
+		return 1;
+
+	ptr = bpf_task_storage_get(&mmapable, task, 0, 0);
+	if (!ptr)
+		return 3;
+	/* Although mmapable mapval is given an entire page, verifier shouldn't
+	 * allow r/w past end of 'long' type
+	 */
+	res = *(ptr + 1);
+
+	return res;
+}
-- 
2.34.1






[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux