Add a test file for hugepage registered buffers, to make sure the fixed buffer coalescing feature works safe and soundly. Testcases include read/write with single/multiple/unaligned/non-2MB hugepage fixed buffers, and also a should-not coalesce case where buffer is a mixture of different size'd pages. ----- Changes since v1: 1. Added unaligned/non-2MB hugepage/page mixture testcases. 2. Rearranged the code. v1: https://lore.kernel.org/io-uring/20240514051343.582556-1-cliang01.li@xxxxxxxxxxx/T/#u Signed-off-by: Chenliang Li <cliang01.li@xxxxxxxxxxx> --- test/Makefile | 1 + test/fixed-hugepage.c | 391 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 392 insertions(+) create mode 100644 test/fixed-hugepage.c diff --git a/test/Makefile b/test/Makefile index 94bdc25..364514d 100644 --- a/test/Makefile +++ b/test/Makefile @@ -88,6 +88,7 @@ test_srcs := \ file-update.c \ file-verify.c \ fixed-buf-iter.c \ + fixed-hugepage.c \ fixed-link.c \ fixed-reuse.c \ fpos.c \ diff --git a/test/fixed-hugepage.c b/test/fixed-hugepage.c new file mode 100644 index 0000000..a5a0947 --- /dev/null +++ b/test/fixed-hugepage.c @@ -0,0 +1,391 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Test fixed buffers consisting of hugepages. + */ +#include <stdio.h> +#include <string.h> +#include <fcntl.h> +#include <stdlib.h> +#include <errno.h> +#include <sys/mman.h> +#include <linux/mman.h> +#include <sys/shm.h> + +#include "liburing.h" +#include "helpers.h" + +/* + * Before testing + * echo madvise > /sys/kernel/mm/transparent_hugepage/enabled + * echo always > /sys/kernel/mm/transparent_hugepage/hugepages-16kB/enabled + * + * Not 100% guaranteed to get THP-backed memory, but in general it does. + */ +#define MTHP_16KB (16UL * 1024) +#define HUGEPAGE_SIZE (2UL * 1024 * 1024) +#define NR_BUFS 1 +#define IN_FD "/dev/urandom" +#define OUT_FD "/dev/zero" + +static int open_files(int *fd_in, int *fd_out) +{ + *fd_in = open(IN_FD, O_RDONLY, 0644); + if (*fd_in < 0) { + perror("open in"); + return -1; + } + + *fd_out = open(OUT_FD, O_RDWR, 0644); + if (*fd_out < 0) { + perror("open out"); + return -1; + } + + return 0; +} + +static void unmap(struct iovec *iov, int nr_bufs, size_t offset) +{ + int i; + + for (i = 0; i < nr_bufs; i++) + munmap(iov[i].iov_base - offset, iov[i].iov_len + offset); + + return; +} + +static int mmap_hugebufs(struct iovec *iov, int nr_bufs, size_t buf_size, size_t offset) +{ + int i; + + for (i = 0; i < nr_bufs; i++) { + void *base = NULL; + + base = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0); + if (!base || base == MAP_FAILED) { + fprintf(stderr, "Error in mmapping the %dth buffer: %s\n", i, strerror(errno)); + unmap(iov, i, offset); + return -1; + } + + memset(base, 0, buf_size); + iov[i].iov_base = base + offset; + iov[i].iov_len = buf_size - offset; + } + + return 0; +} + +/* map a hugepage and smaller page to a contiguous memory */ +static int mmap_mixture(struct iovec *iov, int nr_bufs, size_t buf_size) +{ + int i; + void *small_base = NULL, *huge_base = NULL, *start = NULL; + size_t small_size = buf_size - HUGEPAGE_SIZE; + size_t seg_size = ((buf_size / HUGEPAGE_SIZE) + 1) * HUGEPAGE_SIZE; + + start = mmap(NULL, seg_size * nr_bufs, PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0); + if (start == MAP_FAILED) { + fprintf(stderr, "preserve contiguous memory for page mixture failed.\n"); + return -1; + } + + for (i = 0; i < nr_bufs; i++) { + huge_base = mmap(start, HUGEPAGE_SIZE, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_FIXED, -1, 0); + if (huge_base == MAP_FAILED) { + fprintf(stderr, "Error in mapping the %dth huge page in mixture: %s\n", i, strerror(errno)); + unmap(iov, nr_bufs, 0); + return -1; + } + + small_base = mmap(start + HUGEPAGE_SIZE, small_size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); + if (small_base == MAP_FAILED) { + fprintf(stderr, "Error in mapping the %dth small page in mixture: %s\n", i, strerror(errno)); + unmap(iov, nr_bufs, 0); + return -1; + } + + memset(huge_base, 0, buf_size); + iov[i].iov_base = huge_base; + iov[i].iov_len = buf_size; + start += seg_size; + } + + return 0; +} + +static void free_bufs(struct iovec *iov, int nr_bufs, size_t offset) +{ + int i; + + for (i = 0; i < nr_bufs; i++) + free(iov[i].iov_base - offset); + + return; +} + +static int get_mthp_bufs(struct iovec *iov, int nr_bufs, size_t buf_size, + size_t alignment, size_t offset) +{ + int i; + + for (i = 0; i < nr_bufs; i++) { + void *base = NULL; + + if (posix_memalign(&base, alignment, buf_size)) { + fprintf(stderr, "Failed to allocate the %dth MTHP_16KB buf\n", i); + free_bufs(iov, i, offset); + return -1; + } + + memset(base, 0, buf_size); + iov[i].iov_base = base + offset; + iov[i].iov_len = buf_size - offset; + } + + return 0; +} + +static int do_read(struct io_uring *ring, int fd, struct iovec *iov, int nr_bufs) +{ + struct io_uring_sqe *sqe; + struct io_uring_cqe *cqe; + int i, ret; + + for (i = 0; i < nr_bufs; i++) { + sqe = io_uring_get_sqe(ring); + if (!sqe) { + fprintf(stderr, "Could not get SQE.\n"); + return -1; + } + + io_uring_prep_read_fixed(sqe, fd, iov[i].iov_base, iov[i].iov_len, 0, i); + io_uring_submit(ring); + + ret = io_uring_wait_cqe(ring, &cqe); + if (ret < 0) { + fprintf(stderr, "Error waiting for completion: %s\n", strerror(-ret)); + return -1; + } + + if (cqe->res < 0) { + fprintf(stderr, "Error in async read operation: %s\n", strerror(-cqe->res)); + return -1; + } + if (cqe->res != iov[i].iov_len) { + fprintf(stderr, "cqe res: %d, expected: %lu\n", cqe->res, iov[i].iov_len); + return -1; + } + + io_uring_cqe_seen(ring, cqe); + } + + return 0; +} + +static int do_write(struct io_uring *ring, int fd, struct iovec *iov, int nr_bufs) +{ + struct io_uring_sqe *sqe; + struct io_uring_cqe *cqe; + int i, ret; + + for (i = 0; i < nr_bufs; i++) { + sqe = io_uring_get_sqe(ring); + if (!sqe) { + fprintf(stderr, "Could not get SQE.\n"); + return -1; + } + + io_uring_prep_write_fixed(sqe, fd, iov[i].iov_base, iov[i].iov_len, 0, i); + io_uring_submit(ring); + + ret = io_uring_wait_cqe(ring, &cqe); + if (ret < 0) { + fprintf(stderr, "Error waiting for completion: %s\n", strerror(-ret)); + return -1; + } + + if (cqe->res < 0) { + fprintf(stderr, "Error in async write operation: %s\n", strerror(-cqe->res)); + return -1; + } + if (cqe->res != iov[i].iov_len) { + fprintf(stderr, "cqe res: %d, expected: %lu\n", cqe->res, iov[i].iov_len); + return -1; + } + + io_uring_cqe_seen(ring, cqe); + } + + return 0; +} + +static int register_submit(struct io_uring *ring, struct iovec *iov, + int nr_bufs, int fd_in, int fd_out) +{ + int ret; + + ret = io_uring_register_buffers(ring, iov, nr_bufs); + if (ret) { + fprintf(stderr, "Error registering buffers: %s\n", strerror(-ret)); + return ret; + } + + ret = do_read(ring, fd_in, iov, nr_bufs); + if (ret) { + fprintf(stderr, "Read test failed\n"); + return ret; + } + + ret = do_write(ring, fd_out, iov, nr_bufs); + if (ret) { + fprintf(stderr, "Write test failed\n"); + return ret; + } + + ret = io_uring_unregister_buffers(ring); + if (ret) { + fprintf(stderr, "Error unregistering buffers for one hugepage test: %s", strerror(-ret)); + return ret; + } + + return 0; +} + +static int test_one_hugepage(struct io_uring *ring, int fd_in, int fd_out) +{ + struct iovec iov[NR_BUFS]; + size_t buf_size = HUGEPAGE_SIZE; + int ret; + + if (mmap_hugebufs(iov, NR_BUFS, buf_size, 0)) { + fprintf(stderr, "Skipping one hugepage test.\n"); + return 0; + } + + ret = register_submit(ring, iov, NR_BUFS, fd_in, fd_out); + unmap(iov, NR_BUFS, 0); + return ret; +} + +static int test_multi_hugepages(struct io_uring *ring, int fd_in, int fd_out) +{ + struct iovec iov[NR_BUFS]; + size_t buf_size = 4 * HUGEPAGE_SIZE; + int ret; + + if (mmap_hugebufs(iov, NR_BUFS, buf_size, 0)) { + fprintf(stderr, "Skipping multi hugepages test.\n"); + return 0; + } + + ret = register_submit(ring, iov, NR_BUFS, fd_in, fd_out); + unmap(iov, NR_BUFS, 0); + return ret; +} + +static int test_unaligned_hugepage(struct io_uring *ring, int fd_in, int fd_out) +{ + struct iovec iov[NR_BUFS]; + size_t buf_size = 3 * HUGEPAGE_SIZE; + size_t offset = 0x1234; + int ret; + + if (mmap_hugebufs(iov, NR_BUFS, buf_size, offset)) { + fprintf(stderr, "Skipping unaligned page test.\n"); + return 0; + } + + ret = register_submit(ring, iov, NR_BUFS, fd_in, fd_out); + unmap(iov, NR_BUFS, offset); + return ret; +} + +static int test_multi_unaligned_mthps(struct io_uring *ring, int fd_in, int fd_out) +{ + struct iovec iov[NR_BUFS]; + int ret; + size_t buf_size = 3 * MTHP_16KB; + size_t offset = 0x1234; + + if (get_mthp_bufs(iov, NR_BUFS, buf_size, MTHP_16KB, offset)) { + fprintf(stderr, "Skipping multi-szied transparent hugepages test.\n"); + return 0; + } + + ret = register_submit(ring, iov, NR_BUFS, fd_in, fd_out); + free_bufs(iov, NR_BUFS, offset); + return ret; +} + +/* Should not coalesce */ +static int test_page_mixture(struct io_uring *ring, int fd_in, int fd_out) +{ + struct iovec iov[NR_BUFS]; + size_t buf_size = HUGEPAGE_SIZE + MTHP_16KB; + int ret; + + if (mmap_mixture(iov, NR_BUFS, buf_size)) { + fprintf(stderr, "Skipping page mixture test.\n"); + return 0; + } + + ret = register_submit(ring, iov, NR_BUFS, fd_in, fd_out); + unmap(iov, NR_BUFS, 0); + return ret; +} + +int main(int argc, char *argv[]) +{ + struct io_uring ring; + int ret, fd_in, fd_out; + + if (argc > 1) + return T_EXIT_SKIP; + + if (open_files(&fd_in, &fd_out)) + return T_EXIT_FAIL; + + ret = t_create_ring(8, &ring, 0); + if (ret == T_SETUP_SKIP) + return T_EXIT_SKIP; + else if (ret < 0) + return T_EXIT_FAIL; + + ret = test_one_hugepage(&ring, fd_in, fd_out); + if (ret) { + fprintf(stderr, "Test one hugepage failed"); + return T_EXIT_FAIL; + } + + ret = test_multi_hugepages(&ring, fd_in, fd_out); + if (ret) { + fprintf(stderr, "Test multi hugepages failed"); + return T_EXIT_FAIL; + } + + ret = test_unaligned_hugepage(&ring, fd_in, fd_out); + if (ret) { + fprintf(stderr, "Test unaligned huge page failed\n"); + return T_EXIT_FAIL; + } + + ret = test_multi_unaligned_mthps(&ring, fd_in, fd_out); + if (ret) { + fprintf(stderr, "Test multi unaligned MTHP_16KB huge pages failed\n"); + return T_EXIT_FAIL; + } + + ret = test_page_mixture(&ring, fd_in, fd_out); + if (ret) { + fprintf(stderr, "Test page mixture failed"); + return T_EXIT_FAIL; + } + + io_uring_queue_exit(&ring); + return T_EXIT_PASS; +} -- 2.34.1