On 4/16/19 4:22 PM, Qian Cai wrote: > store_stackinfo() does not seem used in actual SLAB debugging. > Potentially, it could be added to check_poison_obj() to provide more > information, but this seems like an overkill due to the declining > popularity of the SLAB, so just remove it instead. > > Signed-off-by: Qian Cai <cai@xxxxxx> I've acked Thomas' version already which was narrower, but no objection to remove more stuff on top of that. Linus (and I later in another thread) already pointed out /proc/slab_allocators. It only takes a look at add_caller() there to not regret removing that one. > --- > mm/slab.c | 48 ++++++------------------------------------------ > 1 file changed, 6 insertions(+), 42 deletions(-) > > diff --git a/mm/slab.c b/mm/slab.c > index 3e1b7ff0360c..20f318f4f56e 100644 > --- a/mm/slab.c > +++ b/mm/slab.c > @@ -1467,53 +1467,17 @@ static bool is_debug_pagealloc_cache(struct kmem_cache *cachep) > } > > #ifdef CONFIG_DEBUG_PAGEALLOC > -static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, > - unsigned long caller) > -{ > - int size = cachep->object_size; > - > - addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; > - > - if (size < 5 * sizeof(unsigned long)) > - return; > - > - *addr++ = 0x12345678; > - *addr++ = caller; > - *addr++ = smp_processor_id(); > - size -= 3 * sizeof(unsigned long); > - { > - unsigned long *sptr = &caller; > - unsigned long svalue; > - > - while (!kstack_end(sptr)) { > - svalue = *sptr++; > - if (kernel_text_address(svalue)) { > - *addr++ = svalue; > - size -= sizeof(unsigned long); > - if (size <= sizeof(unsigned long)) > - break; > - } > - } > - > - } > - *addr++ = 0x87654321; > -} > - > -static void slab_kernel_map(struct kmem_cache *cachep, void *objp, > - int map, unsigned long caller) > +static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map) > { > if (!is_debug_pagealloc_cache(cachep)) > return; > > - if (caller) > - store_stackinfo(cachep, objp, caller); > - > kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); > } > > #else > static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp, > - int map, unsigned long caller) {} > + int map) {} > > #endif > > @@ -1661,7 +1625,7 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, > > if (cachep->flags & SLAB_POISON) { > check_poison_obj(cachep, objp); > - slab_kernel_map(cachep, objp, 1, 0); > + slab_kernel_map(cachep, objp, 1); > } > if (cachep->flags & SLAB_RED_ZONE) { > if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) > @@ -2433,7 +2397,7 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page) > /* need to poison the objs? */ > if (cachep->flags & SLAB_POISON) { > poison_obj(cachep, objp, POISON_FREE); > - slab_kernel_map(cachep, objp, 0, 0); > + slab_kernel_map(cachep, objp, 0); > } > } > #endif > @@ -2812,7 +2776,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, > > if (cachep->flags & SLAB_POISON) { > poison_obj(cachep, objp, POISON_FREE); > - slab_kernel_map(cachep, objp, 0, caller); > + slab_kernel_map(cachep, objp, 0); > } > return objp; > } > @@ -3076,7 +3040,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, > return objp; > if (cachep->flags & SLAB_POISON) { > check_poison_obj(cachep, objp); > - slab_kernel_map(cachep, objp, 1, 0); > + slab_kernel_map(cachep, objp, 1); > poison_obj(cachep, objp, POISON_INUSE); > } > if (cachep->flags & SLAB_STORE_USER) >