To defang a subset of "dangling pointer" use-after-free flaws[1], take the address of any lvalues passed to kfree() and set them to NULL after freeing. To do this manually, kfree_and_null() (and the "sensitive" variant) are introduced. Link: https://github.com/KSPP/linux/issues/87 [1] Signed-off-by: Kees Cook <kees@xxxxxxxxxx> --- Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Christoph Lameter <cl@xxxxxxxxx> Cc: Pekka Enberg <penberg@xxxxxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Roman Gushchin <roman.gushchin@xxxxxxxxx> Cc: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> Cc: linux-mm@xxxxxxxxx --- include/linux/slab.h | 30 +++++++++++++++++++++++++++--- mm/slab_common.c | 8 ++++---- mm/slub.c | 6 +++--- 3 files changed, 34 insertions(+), 10 deletions(-) diff --git a/include/linux/slab.h b/include/linux/slab.h index 3e807ccc8583..2717ad238fa2 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -465,11 +465,35 @@ void * __must_check krealloc_noprof(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2); #define krealloc(...) alloc_hooks(krealloc_noprof(__VA_ARGS__)) -void kfree(const void *objp); -void kfree_sensitive(const void *objp); +void __kfree(const void *objp); +void __kfree_sensitive(const void *objp); size_t __ksize(const void *objp); -#define __kfree(x) kfree(x) +static inline void kfree_and_null(void **ptr) +{ + __kfree(*ptr); + *ptr = NULL; +} +static inline void kfree_sensitive_and_null(void **ptr) +{ + __kfree_sensitive(*ptr); + *ptr = NULL; +} + +#define __force_lvalue_expr(x) \ + __builtin_choose_expr(__is_lvalue(x), x, (void *){ NULL }) + +#define __free_and_null(__how, x) \ +({ \ + typeof(x) *__ptr = &(x); \ + __how ## _and_null((void **)__ptr); \ +}) +#define __free_and_maybe_null(__how, x) \ + __builtin_choose_expr(__is_lvalue(x), \ + __free_and_null(__how, __force_lvalue_expr(x)), \ + __kfree(x)) +#define kfree(x) __free_and_maybe_null(kfree, x) +#define kfree_sensitive(x) __free_and_maybe_null(kfree_sensitive, x) DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T)) DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T)) diff --git a/mm/slab_common.c b/mm/slab_common.c index 4030907b6b7d..9a82952ec266 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1211,7 +1211,7 @@ module_init(slab_proc_init); #endif /* CONFIG_SLUB_DEBUG */ /** - * kfree_sensitive - Clear sensitive information in memory before freeing + * __kfree_sensitive - Clear sensitive information in memory before freeing * @p: object to free memory of * * The memory of the object @p points to is zeroed before freed. @@ -1221,7 +1221,7 @@ module_init(slab_proc_init); * deal bigger than the requested buffer size passed to kmalloc(). So be * careful when using this function in performance sensitive code. */ -void kfree_sensitive(const void *p) +void __kfree_sensitive(const void *p) { size_t ks; void *mem = (void *)p; @@ -1231,9 +1231,9 @@ void kfree_sensitive(const void *p) kasan_unpoison_range(mem, ks); memzero_explicit(mem, ks); } - kfree(mem); + __kfree(mem); } -EXPORT_SYMBOL(kfree_sensitive); +EXPORT_SYMBOL(__kfree_sensitive); size_t ksize(const void *objp) { diff --git a/mm/slub.c b/mm/slub.c index 1f50129dcfb3..38dd898667bf 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4729,12 +4729,12 @@ static void free_large_kmalloc(struct folio *folio, void *object) } /** - * kfree - free previously allocated memory + * __kfree - free previously allocated memory * @object: pointer returned by kmalloc() or kmem_cache_alloc() * * If @object is NULL, no operation is performed. */ -void kfree(const void *object) +void __kfree(const void *object) { struct folio *folio; struct slab *slab; @@ -4756,7 +4756,7 @@ void kfree(const void *object) s = slab->slab_cache; slab_free(s, slab, x, _RET_IP_); } -EXPORT_SYMBOL(kfree); +EXPORT_SYMBOL(__kfree); static __always_inline __realloc_size(2) void * __do_krealloc(const void *p, size_t new_size, gfp_t flags) -- 2.34.1