On Fri, Sep 09, 2022 at 02:26:34PM +0800, Hyeonggon Yoo wrote: > On Wed, Sep 07, 2022 at 03:10:23PM +0800, Feng Tang wrote: > > kmalloc will round up the request size to a fixed size (mostly power > > of 2), so there could be a extra space than what is requested, whose > > size is the actual buffer size minus original request size. > > > > To better detect out of bound access or abuse of this space, add > > redzone sanity check for it. > > > > And in current kernel, some kmalloc user already knows the existence > > of the space and utilizes it after calling 'ksize()' to know the real > > size of the allocated buffer. So we skip the sanity check for objects > > which have been called with ksize(), as treating them as legitimate > > users. > > > > Suggested-by: Vlastimil Babka <vbabka@xxxxxxx> > > Signed-off-by: Feng Tang <feng.tang@xxxxxxxxx> > > --- [...] > > - if (s->flags & SLAB_RED_ZONE) > > + if (s->flags & SLAB_RED_ZONE) { > > memset(p - s->red_left_pad, val, s->red_left_pad); > > > > + if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { > > + unsigned int zone_start; > > + > > + orig_size = get_orig_size(s, object); > > + zone_start = orig_size; > > + > > + if (!freeptr_outside_object(s)) > > + zone_start = max_t(unsigned int, orig_size, > > + s->offset + sizeof(void *)); > > + > > + /* > > + * Redzone the extra allocated space by kmalloc > > + * than requested. > > + */ > > + if (zone_start < s->object_size) > > + memset(p + zone_start, val, > > + s->object_size - zone_start); > > + } > > + } > > + > > if (s->flags & __OBJECT_POISON) { > > - memset(p, POISON_FREE, s->object_size - 1); > > - p[s->object_size - 1] = POISON_END; > > + memset(p, POISON_FREE, orig_size - 1); > > + p[orig_size - 1] = POISON_END; > > } > > > > if (s->flags & SLAB_RED_ZONE) > > @@ -1103,6 +1139,7 @@ static int check_object(struct kmem_cache *s, struct slab *slab, > > { > > u8 *p = object; > > u8 *endobject = object + s->object_size; > > + unsigned int orig_size; > > > > if (s->flags & SLAB_RED_ZONE) { > > if (!check_bytes_and_report(s, slab, object, "Left Redzone", > > @@ -1112,6 +1149,20 @@ static int check_object(struct kmem_cache *s, struct slab *slab, > > if (!check_bytes_and_report(s, slab, object, "Right Redzone", > > endobject, val, s->inuse - s->object_size)) > > return 0; > > + > > + if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { > > + orig_size = get_orig_size(s, object); > > + > > + if (!freeptr_outside_object(s)) > > + orig_size = max_t(unsigned int, orig_size, > > + s->offset + sizeof(void *)); > > + if (s->object_size > orig_size && > > + !check_bytes_and_report(s, slab, object, > > + "kmalloc Redzone", p + orig_size, > > + val, s->object_size - orig_size)) { > > + return 0; > > + } > > + } > > } else { > > if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { > > check_bytes_and_report(s, slab, p, "Alignment padding", > > -- > > 2.34.1 > > > > Looks good, but what about putting > free pointer outside object when slub_debug_orig_size(s)? Sounds good to me. This makes all kmalloc slabs covered by redzone check. I just gave the code a shot and it just works with my test case! Thanks! - Feng > diff --git a/mm/slub.c b/mm/slub.c > index 9d1a985c9ede..7e57d9f718d1 100644 > --- a/mm/slub.c > +++ b/mm/slub.c > @@ -970,22 +970,15 @@ static void init_object(struct kmem_cache *s, void *object, u8 val) > memset(p - s->red_left_pad, val, s->red_left_pad); > > if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { > - unsigned int zone_start; > - > orig_size = get_orig_size(s, object); > - zone_start = orig_size; > - > - if (!freeptr_outside_object(s)) > - zone_start = max_t(unsigned int, orig_size, > - s->offset + sizeof(void *)); > > /* > * Redzone the extra allocated space by kmalloc > * than requested. > */ > - if (zone_start < s->object_size) > - memset(p + zone_start, val, > - s->object_size - zone_start); > + if (orig_size < s->object_size) > + memset(p + orig_size, val, > + s->object_size - orig_size); > } > } > > @@ -1153,9 +1146,6 @@ static int check_object(struct kmem_cache *s, struct slab *slab, > if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { > orig_size = get_orig_size(s, object); > > - if (!freeptr_outside_object(s)) > - orig_size = max_t(unsigned int, orig_size, > - s->offset + sizeof(void *)); > if (s->object_size > orig_size && > !check_bytes_and_report(s, slab, object, > "kmalloc Redzone", p + orig_size, > @@ -4234,7 +4224,8 @@ static int calculate_sizes(struct kmem_cache *s) > */ > s->inuse = size; > > - if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || > + if (slub_debug_orig_size(s) || > + (flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || > ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) || > s->ctor) { > /* > > -- > Thanks, > Hyeonggon >