For tests that need to allocate using vm_mmap() (e.g. usercopy and execve), provide the interface to have the allocation tracked by KUnit itself. This requires bringing up a placeholder userspace mm. This combines my earlier attempt at this with Mark Rutland's version[1]. Normally alloc_mm() and arch_pick_mmap_layout() aren't exported for modules, so export these only for KUnit testing. Link: https://lore.kernel.org/lkml/20230321122514.1743889-2-mark.rutland@xxxxxxx/ [1] Co-developed-by: Mark Rutland <mark.rutland@xxxxxxx> Signed-off-by: Mark Rutland <mark.rutland@xxxxxxx> Reviewed-by: David Gow <davidgow@xxxxxxxxxx> Signed-off-by: Kees Cook <kees@xxxxxxxxxx> --- include/kunit/test.h | 17 +++++++ kernel/fork.c | 3 ++ lib/kunit/Makefile | 1 + lib/kunit/user_alloc.c | 113 +++++++++++++++++++++++++++++++++++++++++ mm/util.c | 3 ++ 5 files changed, 137 insertions(+) create mode 100644 lib/kunit/user_alloc.c diff --git a/include/kunit/test.h b/include/kunit/test.h index e32b4cb7afa2..ec61cad6b71d 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -480,6 +480,23 @@ static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp return kunit_kmalloc_array(test, n, size, gfp | __GFP_ZERO); } +/** + * kunit_vm_mmap() - Allocate KUnit-tracked vm_mmap() area + * @test: The test context object. + * @file: struct file pointer to map from, if any + * @addr: desired address, if any + * @len: how many bytes to allocate + * @prot: mmap PROT_* bits + * @flag: mmap flags + * @offset: offset into @file to start mapping from. + * + * See vm_mmap() for more information. + */ +unsigned long kunit_vm_mmap(struct kunit *test, struct file *file, + unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flag, + unsigned long offset); + void kunit_cleanup(struct kunit *test); void __printf(2, 3) kunit_log_append(struct string_stream *log, const char *fmt, ...); diff --git a/kernel/fork.c b/kernel/fork.c index 99076dbe27d8..cea203197136 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -115,6 +115,8 @@ #define CREATE_TRACE_POINTS #include <trace/events/task.h> +#include <kunit/visibility.h> + /* * Minimum number of threads to boot the kernel */ @@ -1334,6 +1336,7 @@ struct mm_struct *mm_alloc(void) memset(mm, 0, sizeof(*mm)); return mm_init(mm, current, current_user_ns()); } +EXPORT_SYMBOL_IF_KUNIT(mm_alloc); static inline void __mmput(struct mm_struct *mm) { diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile index 309659a32a78..56dd67dc6e57 100644 --- a/lib/kunit/Makefile +++ b/lib/kunit/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_KUNIT) += kunit.o kunit-objs += test.o \ resource.o \ + user_alloc.o \ static_stub.o \ string-stream.o \ assert.o \ diff --git a/lib/kunit/user_alloc.c b/lib/kunit/user_alloc.c new file mode 100644 index 000000000000..76d3d1345ed7 --- /dev/null +++ b/lib/kunit/user_alloc.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit userspace memory allocation resource management. + */ +#include <kunit/resource.h> +#include <kunit/test.h> +#include <linux/kthread.h> +#include <linux/mm.h> + +struct kunit_vm_mmap_resource { + unsigned long addr; + size_t size; +}; + +/* vm_mmap() arguments */ +struct kunit_vm_mmap_params { + struct file *file; + unsigned long addr; + unsigned long len; + unsigned long prot; + unsigned long flag; + unsigned long offset; +}; + +/* Create and attach a new mm if it doesn't already exist. */ +static int kunit_attach_mm(void) +{ + struct mm_struct *mm; + + if (current->mm) + return 0; + + mm = mm_alloc(); + if (!mm) + return -ENOMEM; + + /* Define the task size. */ + mm->task_size = TASK_SIZE; + + /* Make sure we can allocate new VMAs. */ + arch_pick_mmap_layout(mm, ¤t->signal->rlim[RLIMIT_STACK]); + + /* Attach the mm. It will be cleaned up when the process dies. */ + kthread_use_mm(mm); + + return 0; +} + +static int kunit_vm_mmap_init(struct kunit_resource *res, void *context) +{ + struct kunit_vm_mmap_params *p = context; + struct kunit_vm_mmap_resource vres; + int ret; + + ret = kunit_attach_mm(); + if (ret) + return ret; + + vres.size = p->len; + vres.addr = vm_mmap(p->file, p->addr, p->len, p->prot, p->flag, p->offset); + if (!vres.addr) + return -ENOMEM; + res->data = kmemdup(&vres, sizeof(vres), GFP_KERNEL); + if (!res->data) { + vm_munmap(vres.addr, vres.size); + return -ENOMEM; + } + + return 0; +} + +static void kunit_vm_mmap_free(struct kunit_resource *res) +{ + struct kunit_vm_mmap_resource *vres = res->data; + + /* + * Since this is executed from the test monitoring process, + * the test's mm has already been torn down. We don't need + * to run vm_munmap(vres->addr, vres->size), only clean up + * the vres. + */ + + kfree(vres); + res->data = NULL; +} + +unsigned long kunit_vm_mmap(struct kunit *test, struct file *file, + unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flag, + unsigned long offset) +{ + struct kunit_vm_mmap_params params = { + .file = file, + .addr = addr, + .len = len, + .prot = prot, + .flag = flag, + .offset = offset, + }; + struct kunit_vm_mmap_resource *vres; + + vres = kunit_alloc_resource(test, + kunit_vm_mmap_init, + kunit_vm_mmap_free, + GFP_KERNEL, + ¶ms); + if (vres) + return vres->addr; + return 0; +} +EXPORT_SYMBOL_GPL(kunit_vm_mmap); + +MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); diff --git a/mm/util.c b/mm/util.c index c9e519e6811f..df37c47d9374 100644 --- a/mm/util.c +++ b/mm/util.c @@ -26,6 +26,8 @@ #include <linux/uaccess.h> +#include <kunit/visibility.h> + #include "internal.h" #include "swap.h" @@ -482,6 +484,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) clear_bit(MMF_TOPDOWN, &mm->flags); } #endif +EXPORT_SYMBOL_IF_KUNIT(arch_pick_mmap_layout); /** * __account_locked_vm - account locked pages to an mm's locked_vm -- 2.34.1