Dave Anderson <anderson@xxxxxxxxxx> writes: > Right, but the question is whether it makes more sense to display the > full slab object address containing the red-zone buffer or not. When > debugging slab corruption for example, I would argue it's more relevant > to show the full object address. For example, if you want to follow > that linked list from kmem_cache_cpu.freelist pointer, if I'm not mistaken > the next pointer in each object will be located in the first word of the > object. Anyway, that is more in line with the original intent of kmem -S > command, even in the case when slub debug is turned on. > > On the other hand, in your example above, you are presuming the address > shown should be what the kmalloc() or kmem_cache_alloc() caller receives, > which I agree has merit. But consider that you can enter any address > within an object, and get the same result. > > I wonder whether there can be a compromise, such that in the case of slub > debug, there could be extra information in the output display that indicates > that the object contains a red zone buffer? I see. Then how about this? Adding additional information "(data %lx)". With red_left_zone (object addr < data addr), crash> kmem ffffea0004de2c00 CACHE NAME OBJSIZE ALLOCATED TOTAL SLABS SSIZE ffff88013a719900 ext4_inode_cache 2200 730 742 53 32k SLAB MEMORY NODE TOTAL ALLOCATED FREE ffffea0004de2c00 ffff8801378b0000 0 14 2 12 FREE / [ALLOCATED] ffff8801378b0000 (data ffff8801378b0008, cpu 0 cache) [ffff8801378b08b8] (data ffff8801378b08c0) Without red_left_zone (object addr == data addr), crash> kmem ffffea0004e5be00 CACHE NAME OBJSIZE ALLOCATED TOTAL SLABS SSIZE ffff88013a6f2f00 ext4_inode_cache 2200 730 742 53 32k SLAB MEMORY NODE TOTAL ALLOCATED FREE ffffea0004e5be00 ffff8801396f8000 0 14 2 12 FREE / [ALLOCATED] ffff8801396f8000 (cpu 0 cache) [ffff8801396f88a0] Thanks. --- Fix for "kmem <addr>" for kernels configured with CONFIG_SLUB and SLAB_RED_ZONE. If SLAB_RED_ZONE is enabled, slub adds guard zone of sizeof(void *) onto left of object on v4.6 or later. Without this fix, like following SUPERBLK and [allocate addr] has difference. crash> mount MOUNT SUPERBLK TYPE DEVNAME DIRNAME ffff88013ae58040 ffff88013ac35698 rootfs rootfs / [...] crash> kmem ffff88013ac35698 CACHE NAME OBJSIZE ALLOCATED TOTAL SLABS SSIZE ffff88013ac05bc0 kmalloc-4096 4096 118 126 18 32k SLAB MEMORY NODE TOTAL ALLOCATED FREE ffffea0004eb0c00 ffff88013ac30000 0 7 7 0 FREE / [ALLOCATED] [ffff88013ac35690] [...] This left pad for RED_ZONE may confuses the user, so this adds additional information, like following if data_start != object_start. crash> kmem ffffea0004de2c00 CACHE NAME OBJSIZE ALLOCATED TOTAL SLABS SSIZE ffff88013a719900 ext4_inode_cache 2200 730 742 53 32k SLAB MEMORY NODE TOTAL ALLOCATED FREE ffffea0004de2c00 ffff8801378b0000 0 14 2 12 FREE / [ALLOCATED] ffff8801378b0000 (data ffff8801378b0008, cpu 0 cache) [ffff8801378b08b8] (data ffff8801378b08c0) --- defs.h | 1 + memory.c | 28 ++++++++++++++++++++++++---- symbols.c | 2 ++ 3 files changed, 27 insertions(+), 4 deletions(-) diff -puN memory.c~slub-red_zone-fix memory.c --- crash-64/memory.c~slub-red_zone-fix 2017-02-02 13:01:54.263337316 +0900 +++ crash-64-hirofumi/memory.c 2017-02-03 04:48:24.692595383 +0900 @@ -723,6 +723,7 @@ vm_init(void) MEMBER_OFFSET_INIT(kmem_cache_node, "kmem_cache", "node"); MEMBER_OFFSET_INIT(kmem_cache_cpu_slab, "kmem_cache", "cpu_slab"); MEMBER_OFFSET_INIT(kmem_cache_list, "kmem_cache", "list"); + MEMBER_OFFSET_INIT(kmem_cache_red_left_pad, "kmem_cache", "red_left_pad"); MEMBER_OFFSET_INIT(kmem_cache_name, "kmem_cache", "name"); MEMBER_OFFSET_INIT(kmem_cache_flags, "kmem_cache", "flags"); MEMBER_OFFSET_INIT(kmem_cache_cpu_freelist, "kmem_cache_cpu", "freelist"); @@ -18354,7 +18355,7 @@ do_slab_slub(struct meminfo *si, int ver physaddr_t paddr; ulong vaddr; ushort inuse, objects; - ulong freelist, cpu_freelist, cpu_slab_ptr; + ulong freelist, cpu_freelist, cpu_slab_ptr, red_left_pad; int i, free_objects, cpu_slab, is_free, node; ulong p, q; @@ -18442,6 +18443,13 @@ do_slab_slub(struct meminfo *si, int ver fprintf(fp, "< SLUB: free list END (%d found) >\n", i); } + red_left_pad = 0; + if (VALID_MEMBER(kmem_cache_red_left_pad)) { +#define SLAB_RED_ZONE 0x00000400UL + ulong flags = ULONG(si->cache_buf + OFFSET(kmem_cache_flags)); + if (flags & SLAB_RED_ZONE) + red_left_pad = ULONG(si->cache_buf + OFFSET(kmem_cache_red_left_pad)); + } for (p = vaddr; p < vaddr + objects * si->size; p += si->size) { hq_open(); is_free = FALSE; @@ -18482,9 +18490,21 @@ do_slab_slub(struct meminfo *si, int ver fprintf(fp, " %s%lx%s", is_free ? " " : "[", - p, is_free ? " " : "]"); - if (is_free && (cpu_slab >= 0)) - fprintf(fp, "(cpu %d cache)", cpu_slab); + p, is_free ? " " : "]"); + if (red_left_pad || (is_free && (cpu_slab >= 0))) { + int need_comma = 0; + fprintf(fp, " ("); + if (red_left_pad) { + fprintf(fp, "data %lx", p + red_left_pad); + need_comma = 1; + } + if (is_free && (cpu_slab >= 0)) { + if (need_comma) + fprintf(fp, ", "); + fprintf(fp, "cpu %d cache", cpu_slab); + } + fprintf(fp, ")"); + } fprintf(fp, "\n"); } diff -puN defs.h~slub-red_zone-fix defs.h --- crash-64/defs.h~slub-red_zone-fix 2017-02-02 13:01:54.264337322 +0900 +++ crash-64-hirofumi/defs.h 2017-02-03 03:26:39.211648619 +0900 @@ -1696,6 +1696,7 @@ struct offset_table { long kmem_cache_align; long kmem_cache_name; long kmem_cache_list; + long kmem_cache_red_left_pad; long kmem_cache_node; long kmem_cache_cpu_slab; long page_inuse; diff -puN symbols.c~slub-red_zone-fix symbols.c --- crash-64/symbols.c~slub-red_zone-fix 2017-02-02 13:01:54.264337322 +0900 +++ crash-64-hirofumi/symbols.c 2017-02-03 03:26:39.213648633 +0900 @@ -9330,6 +9330,8 @@ dump_offset_table(char *spec, ulong make OFFSET(kmem_cache_name)); fprintf(fp, " kmem_cache_list: %ld\n", OFFSET(kmem_cache_list)); + fprintf(fp, " kmem_cache_red_left_pad: %ld\n", + OFFSET(kmem_cache_red_left_pad)); fprintf(fp, " kmem_cache_node: %ld\n", OFFSET(kmem_cache_node)); fprintf(fp, " kmem_cache_cpu_slab: %ld\n", _ -- OGAWA Hirofumi <hirofumi@xxxxxxxxxxxxxxxxxx> -- Crash-utility mailing list Crash-utility@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/crash-utility