Sometimes it is desirable to override SLUB's debug facilities default behavior upon stumbling on a cache or object error and just stop the execution in order to grab a coredump, at the error-spotting time, instead of trying to fix the issue and report in an attempt to keep the system rolling. This patch introduces a new debug flag SLAB_PANIC_ON_ERROR, along with its related SLUB-machinery, in order to extend current slub_debug facilites and provide the aforementioned behavior override. Signed-off-by: Rafael Aquini <aquini@xxxxxxxxxx> --- Documentation/vm/slub.rst | 2 ++ include/linux/slab.h | 2 ++ mm/slab.h | 3 ++- mm/slub.c | 44 ++++++++++++++++++++++++++++++--------- 4 files changed, 40 insertions(+), 11 deletions(-) diff --git a/Documentation/vm/slub.rst b/Documentation/vm/slub.rst index 933ada4368ff..51b18c28ec78 100644 --- a/Documentation/vm/slub.rst +++ b/Documentation/vm/slub.rst @@ -54,6 +54,8 @@ Possible debug options are:: caused higher minimum slab orders - Switch all debugging off (useful if the kernel is configured with CONFIG_SLUB_DEBUG_ON) + C Toggle panic on error (crash) to allow for post-mortem + analysis of a coredump taken at the error-spotting time F.e. in order to boot just with sanity checks and red zoning one would specify:: diff --git a/include/linux/slab.h b/include/linux/slab.h index 6d454886bcaf..e3496ad7859f 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -25,6 +25,8 @@ */ /* DEBUG: Perform (expensive) checks on alloc/free */ #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U) +/* DEBUG: panic on error (forced crash) */ +#define SLAB_PANIC_ON_ERROR ((slab_flags_t __force)0x00000200U) /* DEBUG: Red zone objs in a cache */ #define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U) /* DEBUG: Poison objects */ diff --git a/mm/slab.h b/mm/slab.h index 207c83ef6e06..27116f8683a1 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -198,7 +198,8 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size, #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) #elif defined(CONFIG_SLUB_DEBUG) #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ - SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) + SLAB_TRACE | SLAB_CONSISTENCY_CHECKS | \ + SLAB_PANIC_ON_ERROR) #else #define SLAB_DEBUG_FLAGS (0) #endif diff --git a/mm/slub.c b/mm/slub.c index 9bf44955c4f1..8b4fc002b865 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -700,8 +700,6 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) /* Beginning of the filler is the free pointer */ print_section(KERN_ERR, "Padding ", p + off, size_from_object(s) - off); - - dump_stack(); } void object_err(struct kmem_cache *s, struct page *page, @@ -709,6 +707,9 @@ void object_err(struct kmem_cache *s, struct page *page, { slab_bug(s, "%s", reason); print_trailer(s, page, object); + if (unlikely(s->flags & SLAB_PANIC_ON_ERROR)) + panic("BUG: %s: %s", s->name, reason); + dump_stack(); } static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page, @@ -722,6 +723,8 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page, va_end(args); slab_bug(s, "%s", buf); print_page_info(page); + if (unlikely(s->flags & SLAB_PANIC_ON_ERROR)) + panic("BUG: %s: %s", s->name, buf); dump_stack(); } @@ -771,7 +774,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page, fault, end - 1, fault - addr, fault[0], value); print_trailer(s, page, object); - + dump_stack(); restore_bytes(s, what, value, fault, end); return 0; } @@ -1173,13 +1176,14 @@ static inline int free_consistency_checks(struct kmem_cache *s, if (!PageSlab(page)) { slab_err(s, page, "Attempt to free object(0x%p) outside of slab", object); - } else if (!page->slab_cache) { - pr_err("SLUB <none>: no slab for object 0x%p.\n", - object); - dump_stack(); - } else - object_err(s, page, object, - "page slab pointer corrupt."); + } else { + char reason[80]; + + snprintf(reason, sizeof(reason), + "page slab pointer corruption: 0x%p (0x%p expected)", + page->slab_cache, s); + object_err(s, page, object, reason); + } return 0; } return 1; @@ -1291,6 +1295,9 @@ static int __init setup_slub_debug(char *str) */ disable_higher_order_debug = 1; break; + case 'c': + slub_debug |= SLAB_PANIC_ON_ERROR; + break; default: pr_err("slub_debug option '%c' unknown. skipped\n", *str); @@ -5312,6 +5319,22 @@ static ssize_t free_calls_show(struct kmem_cache *s, char *buf) return list_locations(s, buf, TRACK_FREE); } SLAB_ATTR_RO(free_calls); + +static ssize_t +panic_on_error_show(struct kmem_cache *s, char *buf) +{ + return sprintf(buf, "%d\n", !!(s->flags & SLAB_PANIC_ON_ERROR)); +} + +static ssize_t +panic_on_error_store(struct kmem_cache *s, const char *buf, size_t length) +{ + s->flags &= ~SLAB_PANIC_ON_ERROR; + if (buf[0] == '1') + s->flags |= SLAB_PANIC_ON_ERROR; + return length; +} +SLAB_ATTR(panic_on_error); #endif /* CONFIG_SLUB_DEBUG */ #ifdef CONFIG_FAILSLAB @@ -5486,6 +5509,7 @@ static struct attribute *slab_attrs[] = { &validate_attr.attr, &alloc_calls_attr.attr, &free_calls_attr.attr, + &panic_on_error_attr.attr, #endif #ifdef CONFIG_ZONE_DMA &cache_dma_attr.attr, -- 2.25.4