Since the commit 60cd4bcd6238 ("memcg: localize memcg_kmem_enabled() check"), we have supplied the api which users don't have to explicitly check memcg_kmem_enabled(). Signed-off-by: Hui Su <sh_def@xxxxxxx> --- mm/page_alloc.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index eaa227a479e4..dc990a899ded 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1214,8 +1214,8 @@ static __always_inline bool free_pages_prepare(struct page *page, * Do not let hwpoison pages hit pcplists/buddy * Untie memcg state and reset page's owner */ - if (memcg_kmem_enabled() && PageKmemcg(page)) - __memcg_kmem_uncharge_page(page, order); + if (PageKmemcg(page)) + memcg_kmem_uncharge_page(page, order); reset_page_owner(page, order); return false; } @@ -1244,8 +1244,8 @@ static __always_inline bool free_pages_prepare(struct page *page, } if (PageMappingFlags(page)) page->mapping = NULL; - if (memcg_kmem_enabled() && PageKmemcg(page)) - __memcg_kmem_uncharge_page(page, order); + if (PageKmemcg(page)) + memcg_kmem_uncharge_page(page, order); if (check_free) bad += check_free_page(page); if (bad) @@ -4965,8 +4965,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, page = __alloc_pages_slowpath(alloc_mask, order, &ac); out: - if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && - unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) { + if ((gfp_mask & __GFP_ACCOUNT) && page && + unlikely(memcg_kmem_charge_page(page, gfp_mask, order) != 0)) { __free_pages(page, order); page = NULL; } -- 2.29.2