Ensure that the output is consistent in face of partial reads that return to userspace and then resume again later. To this end, we do reads in 1-byte chunks, which is a bit stupid in real life, but works well to simulate interrupted iteration. This also tests case where seq_file buffer is consumed (after seq_printf) on interrupted read before iterator invoked BPF prog again. Signed-off-by: Kumar Kartikeya Dwivedi <memxor@xxxxxxxxx> --- .../selftests/bpf/prog_tests/bpf_iter.c | 33 ++++++++++++------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 7fb995deb22d..c7343a3f5155 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -73,13 +73,13 @@ static void do_dummy_read(struct bpf_program *prog) bpf_link__destroy(link); } -static int read_fd_into_buffer(int fd, char *buf, int size) +static int __read_fd_into_buffer(int fd, char *buf, int size, size_t chunks) { int bufleft = size; int len; do { - len = read(fd, buf, bufleft); + len = read(fd, buf, chunks ?: bufleft); if (len > 0) { buf += len; bufleft -= len; @@ -89,6 +89,11 @@ static int read_fd_into_buffer(int fd, char *buf, int size) return len < 0 ? len : size - bufleft; } +static int read_fd_into_buffer(int fd, char *buf, int size) +{ + return __read_fd_into_buffer(fd, buf, size, 0); +} + static void test_ipv6_route(void) { struct bpf_iter_ipv6_route *skel; @@ -1301,7 +1306,7 @@ static int io_uring_inode_match(int link_fd, int io_uring_fd) return 0; } -void test_io_uring_buf(void) +void test_io_uring_buf(bool partial) { DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); char rbuf[4096], buf[4096] = "B\n"; @@ -1375,7 +1380,7 @@ void test_io_uring_buf(void) if (!ASSERT_GE(iter_fd, 0, "bpf_iter_create")) goto end_close_fd; - ret = read_fd_into_buffer(iter_fd, rbuf, sizeof(rbuf)); + ret = __read_fd_into_buffer(iter_fd, rbuf, sizeof(rbuf), partial); if (!ASSERT_GT(ret, 0, "read_fd_into_buffer")) goto end_close_iter; @@ -1396,7 +1401,7 @@ void test_io_uring_buf(void) bpf_iter_io_uring__destroy(skel); } -void test_io_uring_file(void) +void test_io_uring_file(bool partial) { int reg_files[] = { [0 ... 7] = -1 }; DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); @@ -1464,7 +1469,7 @@ void test_io_uring_file(void) if (!ASSERT_OK(ret, "io_uring_register_files")) goto end_iter_fd; - ret = read_fd_into_buffer(iter_fd, rbuf, sizeof(rbuf)); + ret = __read_fd_into_buffer(iter_fd, rbuf, sizeof(rbuf), partial); if (!ASSERT_GT(ret, 0, "read_fd_into_buffer(iterator_fd, buf)")) goto end_iter_fd; @@ -1488,7 +1493,7 @@ void test_io_uring_file(void) bpf_iter_io_uring__destroy(skel); } -void test_epoll(void) +void test_epoll(bool partial) { const char *fmt = "B\npipe:%d\nsocket:%d\npipe:%d\nsocket:%d\nE\n"; DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); @@ -1554,7 +1559,7 @@ void test_epoll(void) if (!ASSERT_GE(ret, 0, "snprintf") || !ASSERT_LT(ret, sizeof(buf), "snprintf")) goto end_iter_fd; - ret = read_fd_into_buffer(iter_fd, rbuf, sizeof(rbuf)); + ret = __read_fd_into_buffer(iter_fd, rbuf, sizeof(rbuf), partial); if (!ASSERT_GT(ret, 0, "read_fd_into_buffer")) goto end_iter_fd; @@ -1666,9 +1671,15 @@ void test_bpf_iter(void) if (test__start_subtest("buf-neg-offset")) test_buf_neg_offset(); if (test__start_subtest("io_uring_buf")) - test_io_uring_buf(); + test_io_uring_buf(false); if (test__start_subtest("io_uring_file")) - test_io_uring_file(); + test_io_uring_file(false); if (test__start_subtest("epoll")) - test_epoll(); + test_epoll(false); + if (test__start_subtest("io_uring_buf-partial")) + test_io_uring_buf(true); + if (test__start_subtest("io_uring_file-partial")) + test_io_uring_file(true); + if (test__start_subtest("epoll-partial")) + test_epoll(true); } -- 2.34.0