The patch titled Subject: mm: kmsan: handle alloc failures in kmsan_vmap_pages_range_noflush() has been added to the -mm mm-hotfixes-unstable branch. Its filename is mm-kmsan-handle-alloc-failures-in-kmsan_vmap_pages_range_noflush.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-kmsan-handle-alloc-failures-in-kmsan_vmap_pages_range_noflush.patch This patch will later appear in the mm-hotfixes-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Alexander Potapenko <glider@xxxxxxxxxx> Subject: mm: kmsan: handle alloc failures in kmsan_vmap_pages_range_noflush() Date: Thu, 13 Apr 2023 15:12:20 +0200 As reported by Dipanjan Das, when KMSAN is used together with kernel fault injection (or, generally, even without the latter), calls to kcalloc() or __vmap_pages_range_noflush() may fail, leaving the metadata mappings for the virtual mapping in an inconsistent state. When these metadata mappings are accessed later, the kernel crashes. To address the problem, we return a non-zero error code from kmsan_vmap_pages_range_noflush() in the case of any allocation/mapping failure inside it, and make vmap_pages_range_noflush() return an error if KMSAN fails to allocate the metadata. This patch also removes KMSAN_WARN_ON() from vmap_pages_range_noflush(), as these allocation failures are not fatal anymore. Link: https://lkml.kernel.org/r/20230413131223.4135168-1-glider@xxxxxxxxxx Fixes: b073d7f8aee4 ("mm: kmsan: maintain KMSAN metadata for page operations") Signed-off-by: Alexander Potapenko <glider@xxxxxxxxxx> Reported-by: Dipanjan Das <mail.dipanjan.das@xxxxxxxxx> Link: https://lore.kernel.org/linux-mm/CANX2M5ZRrRA64k0hOif02TjmY9kbbO2aCBPyq79es34RXZ=cAw@xxxxxxxxxxxxxx/ Cc: Christoph Hellwig <hch@xxxxxxxxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Marco Elver <elver@xxxxxxxxxx> Cc: Uladzislau Rezki (Sony) <urezki@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/kmsan.h | 20 +++++++++++--------- mm/kmsan/shadow.c | 27 ++++++++++++++++++--------- mm/vmalloc.c | 6 +++++- 3 files changed, 34 insertions(+), 19 deletions(-) --- a/include/linux/kmsan.h~mm-kmsan-handle-alloc-failures-in-kmsan_vmap_pages_range_noflush +++ a/include/linux/kmsan.h @@ -134,11 +134,12 @@ void kmsan_kfree_large(const void *ptr); * @page_shift: page_shift passed to vmap_range_noflush(). * * KMSAN maps shadow and origin pages of @pages into contiguous ranges in - * vmalloc metadata address range. + * vmalloc metadata address range. Returns 0 on success, callers must check + * for non-zero return value. */ -void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end, - pgprot_t prot, struct page **pages, - unsigned int page_shift); +int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end, + pgprot_t prot, struct page **pages, + unsigned int page_shift); /** * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap. @@ -281,12 +282,13 @@ static inline void kmsan_kfree_large(con { } -static inline void kmsan_vmap_pages_range_noflush(unsigned long start, - unsigned long end, - pgprot_t prot, - struct page **pages, - unsigned int page_shift) +static inline int kmsan_vmap_pages_range_noflush(unsigned long start, + unsigned long end, + pgprot_t prot, + struct page **pages, + unsigned int page_shift) { + return 0; } static inline void kmsan_vunmap_range_noflush(unsigned long start, --- a/mm/kmsan/shadow.c~mm-kmsan-handle-alloc-failures-in-kmsan_vmap_pages_range_noflush +++ a/mm/kmsan/shadow.c @@ -216,27 +216,29 @@ void kmsan_free_page(struct page *page, kmsan_leave_runtime(); } -void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end, - pgprot_t prot, struct page **pages, - unsigned int page_shift) +int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end, + pgprot_t prot, struct page **pages, + unsigned int page_shift) { unsigned long shadow_start, origin_start, shadow_end, origin_end; struct page **s_pages, **o_pages; - int nr, mapped; + int nr, mapped, err = 0; if (!kmsan_enabled) - return; + return 0; shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW); shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW); if (!shadow_start) - return; + return 0; nr = (end - start) / PAGE_SIZE; s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL); o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL); - if (!s_pages || !o_pages) + if (!s_pages || !o_pages) { + err = -ENOMEM; goto ret; + } for (int i = 0; i < nr; i++) { s_pages[i] = shadow_page_for(pages[i]); o_pages[i] = origin_page_for(pages[i]); @@ -249,10 +251,16 @@ void kmsan_vmap_pages_range_noflush(unsi kmsan_enter_runtime(); mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot, s_pages, page_shift); - KMSAN_WARN_ON(mapped); + if (mapped) { + err = mapped; + goto ret; + } mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot, o_pages, page_shift); - KMSAN_WARN_ON(mapped); + if (mapped) { + err = mapped; + goto ret; + } kmsan_leave_runtime(); flush_tlb_kernel_range(shadow_start, shadow_end); flush_tlb_kernel_range(origin_start, origin_end); @@ -262,6 +270,7 @@ void kmsan_vmap_pages_range_noflush(unsi ret: kfree(s_pages); kfree(o_pages); + return err; } /* Allocate metadata for pages allocated at boot time. */ --- a/mm/vmalloc.c~mm-kmsan-handle-alloc-failures-in-kmsan_vmap_pages_range_noflush +++ a/mm/vmalloc.c @@ -605,7 +605,11 @@ int __vmap_pages_range_noflush(unsigned int vmap_pages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift) { - kmsan_vmap_pages_range_noflush(addr, end, prot, pages, page_shift); + int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages, + page_shift); + + if (ret) + return ret; return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift); } _ Patches currently in -mm which might be from glider@xxxxxxxxxx are mm-kmsan-handle-alloc-failures-in-kmsan_vmap_pages_range_noflush.patch mm-kmsan-handle-alloc-failures-in-kmsan_ioremap_page_range.patch mm-kmsan-apply-__must_check-to-non-void-functions.patch mm-apply-__must_check-to-vmap_pages_range_noflush.patch