kmalloc cannot allocate memory from HIGHMEM. Allocating large amounts of memory currently bypasses the check and will simply leak the memory when page_address() returns NULL. To fix this, factor the GFP_SLAB_BUG_MASK check out of slab & slub, and call it from kmalloc_order() as well. In order to make the code clear, the warning message is put in one place. Signed-off-by: Long Li <lonuxli.64@xxxxxxxxx> --- changes in V4: -Change the check function name to kmalloc_check_flags() -Put the flags check into the kmalloc_check_flags() changes in V3: -Put the warning message in one place -updage the change log to be clear mm/slab.c | 8 +------- mm/slab.h | 1 + mm/slab_common.c | 18 +++++++++++++++++- mm/slub.c | 8 +------- 4 files changed, 20 insertions(+), 15 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index ac7a223d9ac3..755f33f96f04 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2573,13 +2573,7 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep, * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ - if (unlikely(flags & GFP_SLAB_BUG_MASK)) { - gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; - flags &= ~GFP_SLAB_BUG_MASK; - pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n", - invalid_mask, &invalid_mask, flags, &flags); - dump_stack(); - } + flags = kmalloc_check_flags(flags); WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO)); local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); diff --git a/mm/slab.h b/mm/slab.h index a06f3313e4a0..48df5660764c 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -90,6 +90,7 @@ void create_kmalloc_caches(slab_flags_t); struct kmem_cache *kmalloc_slab(size_t, gfp_t); #endif +gfp_t kmalloc_check_flags(gfp_t flags); /* Functions provided by the slab allocators */ int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); diff --git a/mm/slab_common.c b/mm/slab_common.c index a143a8c8f874..9184e4575d6d 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -26,6 +26,8 @@ #define CREATE_TRACE_POINTS #include <trace/events/kmem.h> +#include "internal.h" + #include "slab.h" enum slab_state slab_state; @@ -805,6 +807,20 @@ void __init create_kmalloc_caches(slab_flags_t flags) } #endif /* !CONFIG_SLOB */ +gfp_t kmalloc_check_flags(gfp_t flags) +{ + if (unlikely(flags & GFP_SLAB_BUG_MASK)) { + gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; + + flags &= ~GFP_SLAB_BUG_MASK; + pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n", + invalid_mask, &invalid_mask, flags, &flags); + dump_stack(); + } + + return flags; +} + /* * To avoid unnecessary overhead, we pass through large allocation requests * directly to the page allocator. We use __GFP_COMP, because we will need to @@ -815,7 +831,7 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) void *ret = NULL; struct page *page; - flags |= __GFP_COMP; + flags = kmalloc_check_flags(flags) | __GFP_COMP; page = alloc_pages(flags, order); if (likely(page)) { ret = page_address(page); diff --git a/mm/slub.c b/mm/slub.c index 62d2de56549e..8e787767850f 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1817,13 +1817,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) { - if (unlikely(flags & GFP_SLAB_BUG_MASK)) { - gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; - flags &= ~GFP_SLAB_BUG_MASK; - pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n", - invalid_mask, &invalid_mask, flags, &flags); - dump_stack(); - } + flags = kmalloc_check_flags(flags); return allocate_slab(s, flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); -- 2.17.1