kmalloc will round up the request size to a fixed size (mostly power of 2), so there could be a extra space than what is requested, whose size is the actual buffer size minus original request size. To better detect out of bound access or abuse of this space, add redzone sanity check for it. And in current kernel, some kmalloc user already knows the existence of the space and utilizes it after calling 'ksize()' to know the real size of the allocated buffer. So we skip the sanity check for objects which have been called with ksize(), as treating them as legitimate users. Suggested-by: Vlastimil Babka <vbabka@xxxxxxx> Signed-off-by: Feng Tang <feng.tang@xxxxxxxxx> --- mm/slub.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 3 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 946919066a4b..added2653bb0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -836,6 +836,11 @@ static inline void set_orig_size(struct kmem_cache *s, *(unsigned int *)p = orig_size; } +static inline void skip_orig_size_check(struct kmem_cache *s, const void *object) +{ + set_orig_size(s, (void *)object, s->object_size); +} + static unsigned int get_orig_size(struct kmem_cache *s, void *object) { void *p = kasan_reset_tag(object); @@ -967,13 +972,35 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab, static void init_object(struct kmem_cache *s, void *object, u8 val) { u8 *p = kasan_reset_tag(object); + unsigned int orig_size = s->object_size; - if (s->flags & SLAB_RED_ZONE) + if (s->flags & SLAB_RED_ZONE) { memset(p - s->red_left_pad, val, s->red_left_pad); + if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { + unsigned int zone_start; + + orig_size = get_orig_size(s, object); + zone_start = orig_size; + + if (!freeptr_outside_object(s)) + zone_start = max_t(unsigned int, orig_size, + s->offset + sizeof(void *)); + + /* + * Redzone the extra allocated space by kmalloc + * than requested. + */ + if (zone_start < s->object_size) + memset(p + zone_start, val, + s->object_size - zone_start); + } + + } + if (s->flags & __OBJECT_POISON) { - memset(p, POISON_FREE, s->object_size - 1); - p[s->object_size - 1] = POISON_END; + memset(p, POISON_FREE, orig_size - 1); + p[orig_size - 1] = POISON_END; } if (s->flags & SLAB_RED_ZONE) @@ -1120,6 +1147,7 @@ static int check_object(struct kmem_cache *s, struct slab *slab, { u8 *p = object; u8 *endobject = object + s->object_size; + unsigned int orig_size; if (s->flags & SLAB_RED_ZONE) { if (!check_bytes_and_report(s, slab, object, "Left Redzone", @@ -1129,6 +1157,20 @@ static int check_object(struct kmem_cache *s, struct slab *slab, if (!check_bytes_and_report(s, slab, object, "Right Redzone", endobject, val, s->inuse - s->object_size)) return 0; + + if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { + orig_size = get_orig_size(s, object); + + if (!freeptr_outside_object(s)) + orig_size = max_t(unsigned int, orig_size, + s->offset + sizeof(void *)); + if (s->object_size > orig_size && + !check_bytes_and_report(s, slab, object, + "kmalloc Redzone", p + orig_size, + val, s->object_size - orig_size)) { + return 0; + } + } } else { if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { check_bytes_and_report(s, slab, p, "Alignment padding", @@ -4588,6 +4630,10 @@ size_t __ksize(const void *object) if (unlikely(!folio_test_slab(folio))) return folio_size(folio); +#ifdef CONFIG_SLUB_DEBUG + skip_orig_size_check(folio_slab(folio)->slab_cache, object); +#endif + return slab_ksize(folio_slab(folio)->slab_cache); } EXPORT_SYMBOL(__ksize); -- 2.27.0