Works exactly as __get_free_pages except it also tries to charge newly allocated pages to kmemcg. It will be used by the next patch. Signed-off-by: Vladimir Davydov <vdavydov@xxxxxxxxxxxxx> --- include/linux/gfp.h | 1 + mm/page_alloc.c | 12 ++++++++++++ 2 files changed, 13 insertions(+) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index b46147c45966..34dc0db54b59 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -362,6 +362,7 @@ extern struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order); extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); +extern unsigned long __get_free_kmem_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); void *alloc_pages_exact(size_t size, gfp_t gfp_mask); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 88d85367c81e..e4a3a7aa8e42 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3296,6 +3296,18 @@ unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) } EXPORT_SYMBOL(__get_free_pages); +unsigned long __get_free_kmem_pages(gfp_t gfp_mask, unsigned int order) +{ + struct page *page; + + VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); + + page = alloc_kmem_pages(gfp_mask, order); + if (!page) + return 0; + return (unsigned long) page_address(page); +} + unsigned long get_zeroed_page(gfp_t gfp_mask) { return __get_free_pages(gfp_mask | __GFP_ZERO, 0); -- 2.1.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>