The quilt patch titled Subject: kasan: add return value for kasan_mempool_poison_object has been removed from the -mm tree. Its filename was kasan-add-return-value-for-kasan_mempool_poison_object.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Subject: kasan: add return value for kasan_mempool_poison_object Date: Tue, 19 Dec 2023 23:28:48 +0100 Add a return value for kasan_mempool_poison_object that lets the caller know whether the allocation is affected by a double-free or an invalid-free bug. The caller can use this return value to stop operating on the object. Also introduce a check_page_allocation helper function to improve the code readability. Link: https://lkml.kernel.org/r/618af65273875fb9f56954285443279b15f1fcd9.1703024586.git.andreyknvl@xxxxxxxxxx Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Cc: Alexander Lobakin <alobakin@xxxxx> Cc: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Andrey Ryabinin <ryabinin.a.a@xxxxxxxxx> Cc: Breno Leitao <leitao@xxxxxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Evgenii Stepanov <eugenis@xxxxxxxxxx> Cc: Marco Elver <elver@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/kasan.h | 17 ++++++++++++----- mm/kasan/common.c | 21 ++++++++++----------- 2 files changed, 22 insertions(+), 16 deletions(-) --- a/include/linux/kasan.h~kasan-add-return-value-for-kasan_mempool_poison_object +++ a/include/linux/kasan.h @@ -212,7 +212,7 @@ static __always_inline void * __must_che return (void *)object; } -void __kasan_mempool_poison_object(void *ptr, unsigned long ip); +bool __kasan_mempool_poison_object(void *ptr, unsigned long ip); /** * kasan_mempool_poison_object - Check and poison a mempool slab allocation. * @ptr: Pointer to the slab allocation. @@ -225,16 +225,20 @@ void __kasan_mempool_poison_object(void * without putting it into the quarantine (for the Generic mode). * * This function also performs checks to detect double-free and invalid-free - * bugs and reports them. + * bugs and reports them. The caller can use the return value of this function + * to find out if the allocation is buggy. * * This function operates on all slab allocations including large kmalloc * allocations (the ones returned by kmalloc_large() or by kmalloc() with the * size > KMALLOC_MAX_SIZE). + * + * Return: true if the allocation can be safely reused; false otherwise. */ -static __always_inline void kasan_mempool_poison_object(void *ptr) +static __always_inline bool kasan_mempool_poison_object(void *ptr) { if (kasan_enabled()) - __kasan_mempool_poison_object(ptr, _RET_IP_); + return __kasan_mempool_poison_object(ptr, _RET_IP_); + return true; } /* @@ -293,7 +297,10 @@ static inline void *kasan_krealloc(const { return (void *)object; } -static inline void kasan_mempool_poison_object(void *ptr) {} +static inline bool kasan_mempool_poison_object(void *ptr) +{ + return true; +} static inline bool kasan_check_byte(const void *address) { return true; --- a/mm/kasan/common.c~kasan-add-return-value-for-kasan_mempool_poison_object +++ a/mm/kasan/common.c @@ -254,7 +254,7 @@ bool __kasan_slab_free(struct kmem_cache return ____kasan_slab_free(cache, object, ip, true, init); } -static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip) +static inline bool check_page_allocation(void *ptr, unsigned long ip) { if (!kasan_arch_is_ready()) return false; @@ -269,17 +269,14 @@ static inline bool ____kasan_kfree_large return true; } - /* - * The object will be poisoned by kasan_poison_pages() or - * kasan_mempool_poison_object(). - */ - return false; } void __kasan_kfree_large(void *ptr, unsigned long ip) { - ____kasan_kfree_large(ptr, ip); + check_page_allocation(ptr, ip); + + /* The object will be poisoned by kasan_poison_pages(). */ } void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, @@ -429,7 +426,7 @@ void * __must_check __kasan_krealloc(con return ____kasan_kmalloc(slab->slab_cache, object, size, flags); } -void __kasan_mempool_poison_object(void *ptr, unsigned long ip) +bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) { struct folio *folio; @@ -442,13 +439,15 @@ void __kasan_mempool_poison_object(void * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. */ if (unlikely(!folio_test_slab(folio))) { - if (____kasan_kfree_large(ptr, ip)) - return; + if (check_page_allocation(ptr, ip)) + return false; kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false); + return true; } else { struct slab *slab = folio_slab(folio); - ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false); + return !____kasan_slab_free(slab->slab_cache, ptr, ip, + false, false); } } _ Patches currently in -mm which might be from andreyknvl@xxxxxxxxxx are kasan-stop-leaking-stack-trace-handles.patch