There are few places that call kmem_cache_debug(s) (which tests if any of debug flags are enabled for a cache) immediatelly followed by a test for a specific flag. The compiler can probably eliminate the extra check, but we can make the code nicer by introducing kmem_cache_debug_flags() that works like kmem_cache_debug() (including the static key check) but tests for specifig flag(s). The next patches will add more users. Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> --- mm/slub.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 24d3e5f832aa..c8e8b4ae2451 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -122,18 +122,28 @@ DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); #endif #endif -static inline int kmem_cache_debug(struct kmem_cache *s) +/* + * Returns true if any of the specified slub_debug flags is enabled for the + * cache. Use only for flags parsed by setup_slub_debug() as it also enables + * the static key. + */ +static inline int kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags) { #ifdef CONFIG_SLUB_DEBUG if (static_branch_unlikely(&slub_debug_enabled)) - return s->flags & SLAB_DEBUG_FLAGS; + return s->flags & flags; #endif return 0; } +static inline int kmem_cache_debug(struct kmem_cache *s) +{ + return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); +} + void *fixup_red_left(struct kmem_cache *s, void *p) { - if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) + if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) p += s->red_left_pad; return p; @@ -4076,7 +4086,7 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, offset = (ptr - page_address(page)) % s->size; /* Adjust for redzone and reject if within the redzone. */ - if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) { + if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { if (offset < s->red_left_pad) usercopy_abort("SLUB object in left red zone", s->name, to_user, offset, n); -- 2.26.2