The quilt patch titled Subject: mempool: do not use ksize() for poisoning has been removed from the -mm tree. Its filename was mempool-do-not-use-ksize-for-poisoning.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Kees Cook <keescook@xxxxxxxxxxxx> Subject: mempool: do not use ksize() for poisoning Date: Fri, 28 Oct 2022 08:53:01 -0700 Nothing appears to be using ksize() within the kmalloc-backed mempools except the mempool poisoning logic. Use the actual pool size instead of the ksize() to avoid needing any special handling of the memory as needed by KASAN, UBSAN_BOUNDS, nor FORTIFY_SOURCE. [vbabka@xxxxxxx: for slab mempools pool_data is not object size] Link: https://lkml.kernel.org/r/13c4bd6e-09d3-efce-43a5-5a99be8bc96b@xxxxxxx Link: https://lkml.kernel.org/r/20221028154823.you.615-kees@xxxxxxxxxx Signed-off-by: Kees Cook <keescook@xxxxxxxxxxxx> Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> Suggested-by: Vlastimil Babka <vbabka@xxxxxxx> Link: https://lore.kernel.org/lkml/f4fc52c4-7c18-1d76-0c7a-4058ea2486b9@xxxxxxx/ Acked-by: Vlastimil Babka <vbabka@xxxxxxx> Reviewed-by: Andrey Konovalov <andreyknvl@xxxxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Cc: Marco Elver <elver@xxxxxxxxxx> Cc: Vincenzo Frascino <vincenzo.frascino@xxxxxxx> Reported-by: Anders Roxell <anders.roxell@xxxxxxxxxx> Link: https://lore.kernel.org/all/20221031105514.GB69385@mutt/ Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/mempool.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) --- a/mm/mempool.c~mempool-do-not-use-ksize-for-poisoning +++ a/mm/mempool.c @@ -57,8 +57,10 @@ static void __check_element(mempool_t *p static void check_element(mempool_t *pool, void *element) { /* Mempools backed by slab allocator */ - if (pool->free == mempool_free_slab || pool->free == mempool_kfree) { - __check_element(pool, element, ksize(element)); + if (pool->free == mempool_kfree) { + __check_element(pool, element, (size_t)pool->pool_data); + } else if (pool->free == mempool_free_slab) { + __check_element(pool, element, kmem_cache_size(pool->pool_data)); } else if (pool->free == mempool_free_pages) { /* Mempools backed by page allocator */ int order = (int)(long)pool->pool_data; @@ -80,8 +82,10 @@ static void __poison_element(void *eleme static void poison_element(mempool_t *pool, void *element) { /* Mempools backed by slab allocator */ - if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) { - __poison_element(element, ksize(element)); + if (pool->alloc == mempool_kmalloc) { + __poison_element(element, (size_t)pool->pool_data); + } else if (pool->alloc == mempool_alloc_slab) { + __poison_element(element, kmem_cache_size(pool->pool_data)); } else if (pool->alloc == mempool_alloc_pages) { /* Mempools backed by page allocator */ int order = (int)(long)pool->pool_data; @@ -111,8 +115,10 @@ static __always_inline void kasan_poison static void kasan_unpoison_element(mempool_t *pool, void *element) { - if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) - kasan_unpoison_range(element, __ksize(element)); + if (pool->alloc == mempool_kmalloc) + kasan_unpoison_range(element, (size_t)pool->pool_data); + else if (pool->alloc == mempool_alloc_slab) + kasan_unpoison_range(element, kmem_cache_size(pool->pool_data)); else if (pool->alloc == mempool_alloc_pages) kasan_unpoison_pages(element, (unsigned long)pool->pool_data, false); _ Patches currently in -mm which might be from keescook@xxxxxxxxxxxx are