On Thu, 3 Apr 2014, Vladimir Davydov wrote: > --- a/include/linux/slab.h > +++ b/include/linux/slab.h > @@ -358,16 +358,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, > #include <linux/slub_def.h> > #endif > > -static __always_inline void * > -kmalloc_order(size_t size, gfp_t flags, unsigned int order) > -{ > - void *ret; > - > - flags |= (__GFP_COMP | __GFP_KMEMCG); > - ret = (void *) __get_free_pages(flags, order); > - kmemleak_alloc(ret, size, 1, flags); > - return ret; > -} > +extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); Hmmm... This was intentional inlined to allow inline expansion for calls to kmalloc with large constants. The inline expansion directly converts these calls to page allocator calls avoiding slab overhead. -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>