Add tests for memory mapping and NUMA memory policy binding in guest_memfd. This extends the existing selftests by adding proper validation for: - Basic mmap() functionality - KVM GMEM set_policy and get_policy() vm_ops functionality using mbind() and get_mempolicy() - NUMA policy application before and after memory allocation These tests help ensure NUMA support for guest_memfd works correctly. Signed-off-by: Shivank Garg <shivankg@xxxxxxx> --- .../testing/selftests/kvm/guest_memfd_test.c | 86 ++++++++++++++++++- 1 file changed, 82 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c index ce687f8d248f..b9c845cc41e0 100644 --- a/tools/testing/selftests/kvm/guest_memfd_test.c +++ b/tools/testing/selftests/kvm/guest_memfd_test.c @@ -13,9 +13,11 @@ #include <linux/bitmap.h> #include <linux/falloc.h> +#include <linux/mempolicy.h> #include <sys/mman.h> #include <sys/types.h> #include <sys/stat.h> +#include <sys/syscall.h> #include "kvm_util.h" #include "test_util.h" @@ -34,12 +36,86 @@ static void test_file_read_write(int fd) "pwrite on a guest_mem fd should fail"); } -static void test_mmap(int fd, size_t page_size) +static void test_mmap(int fd, size_t page_size, size_t total_size) { char *mem; - mem = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); - TEST_ASSERT_EQ(mem, MAP_FAILED); + mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + TEST_ASSERT(mem != MAP_FAILED, "mmap should succeed"); + TEST_ASSERT(munmap(mem, total_size) == 0, "munmap should succeed"); +} + +static void test_mbind(int fd, size_t page_size, size_t total_size) +{ + unsigned long nodemask = 1; /* nid: 0 */ + unsigned long maxnode = 8; + unsigned long get_nodemask; + int get_policy; + void *mem; + int ret; + + mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + TEST_ASSERT(mem != MAP_FAILED, "mmap for mbind test should succeed"); + + /* Test MPOL_INTERLEAVE policy */ + ret = syscall(__NR_mbind, mem, page_size * 2, MPOL_INTERLEAVE, + &nodemask, maxnode, 0); + TEST_ASSERT(!ret, "mbind with INTERLEAVE to node 0 should succeed"); + ret = syscall(__NR_get_mempolicy, &get_policy, &get_nodemask, + maxnode, mem, MPOL_F_ADDR); + TEST_ASSERT(!ret && get_policy == MPOL_INTERLEAVE && get_nodemask == nodemask, + "Policy should be MPOL_INTERLEAVE and nodes match"); + + /* Test basic MPOL_BIND policy */ + ret = syscall(__NR_mbind, mem + page_size * 2, page_size * 2, MPOL_BIND, + &nodemask, maxnode, 0); + TEST_ASSERT(!ret, "mbind with MPOL_BIND to node 0 should succeed"); + ret = syscall(__NR_get_mempolicy, &get_policy, &get_nodemask, + maxnode, mem + page_size * 2, MPOL_F_ADDR); + TEST_ASSERT(!ret && get_policy == MPOL_BIND && get_nodemask == nodemask, + "Policy should be MPOL_BIND and nodes match"); + + /* Test MPOL_DEFAULT policy */ + ret = syscall(__NR_mbind, mem, total_size, MPOL_DEFAULT, NULL, 0, 0); + TEST_ASSERT(!ret, "mbind with MPOL_DEFAULT should succeed"); + ret = syscall(__NR_get_mempolicy, &get_policy, &get_nodemask, + maxnode, mem, MPOL_F_ADDR); + TEST_ASSERT(!ret && get_policy == MPOL_DEFAULT && get_nodemask == 0, + "Policy should be MPOL_DEFAULT and nodes zero"); + + /* Test with invalid policy */ + ret = syscall(__NR_mbind, mem, page_size, 999, &nodemask, maxnode, 0); + TEST_ASSERT(ret == -1 && errno == EINVAL, + "mbind with invalid policy should fail with EINVAL"); + + TEST_ASSERT(munmap(mem, total_size) == 0, "munmap should succeed"); +} + +static void test_numa_allocation(int fd, size_t page_size, size_t total_size) +{ + unsigned long nodemask = 1; /* Node 0 */ + unsigned long maxnode = 8; + void *mem; + int ret; + + mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + TEST_ASSERT(mem != MAP_FAILED, "mmap should succeed"); + + /* Set NUMA policy after allocation */ + ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, 0, page_size * 2); + TEST_ASSERT(!ret, "fallocate with aligned offset and size should succeed"); + ret = syscall(__NR_mbind, mem, page_size * 2, MPOL_BIND, &nodemask, + maxnode, 0); + TEST_ASSERT(!ret, "mbind should succeed"); + + /* Set NUMA policy before allocation */ + ret = syscall(__NR_mbind, mem + page_size * 2, page_size, MPOL_BIND, + &nodemask, maxnode, 0); + TEST_ASSERT(!ret, "mbind should succeed"); + ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, page_size * 2, page_size * 2); + TEST_ASSERT(!ret, "fallocate with aligned offset and size should succeed"); + + TEST_ASSERT(munmap(mem, total_size) == 0, "munmap should succeed"); } static void test_file_size(int fd, size_t page_size, size_t total_size) @@ -190,7 +266,9 @@ int main(int argc, char *argv[]) fd = vm_create_guest_memfd(vm, total_size, 0); test_file_read_write(fd); - test_mmap(fd, page_size); + test_mmap(fd, page_size, total_size); + test_mbind(fd, page_size, total_size); + test_numa_allocation(fd, page_size, total_size); test_file_size(fd, page_size, total_size); test_fallocate(fd, page_size, total_size); test_invalid_punch_hole(fd, page_size, total_size); -- 2.34.1