From: Roman Gushchin <guro@xxxxxx> Subject: mm: slab: rename (un)charge_slab_page() to (un)account_slab_page() charge_slab_page() and uncharge_slab_page() are not related anymore to memcg charging and uncharging. In order to make their names less confusing, let's rename them to account_slab_page() and unaccount_slab_page() respectively. Link: http://lkml.kernel.org/r/20200707173612.124425-2-guro@xxxxxx Signed-off-by: Roman Gushchin <guro@xxxxxx> Reviewed-by: Shakeel Butt <shakeelb@xxxxxxxxxx> Acked-by: Vlastimil Babka <vbabka@xxxxxxx> Cc: Christoph Lameter <cl@xxxxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxxxx> Cc: Pekka Enberg <penberg@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/slab.c | 4 ++-- mm/slab.h | 8 ++++---- mm/slub.c | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) --- a/mm/slab.c~mm-slab-rename-uncharge_slab_page-to-unaccount_slab_page +++ a/mm/slab.c @@ -1379,7 +1379,7 @@ static struct page *kmem_getpages(struct return NULL; } - charge_slab_page(page, cachep->gfporder, cachep); + account_slab_page(page, cachep->gfporder, cachep); __SetPageSlab(page); /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ if (sk_memalloc_socks() && page_is_pfmemalloc(page)) @@ -1403,7 +1403,7 @@ static void kmem_freepages(struct kmem_c if (current->reclaim_state) current->reclaim_state->reclaimed_slab += 1 << order; - uncharge_slab_page(page, order, cachep); + unaccount_slab_page(page, order, cachep); __free_pages(page, order); } --- a/mm/slab.h~mm-slab-rename-uncharge_slab_page-to-unaccount_slab_page +++ a/mm/slab.h @@ -423,15 +423,15 @@ static inline struct kmem_cache *virt_to return page->slab_cache; } -static __always_inline void charge_slab_page(struct page *page, int order, - struct kmem_cache *s) +static __always_inline void account_slab_page(struct page *page, int order, + struct kmem_cache *s) { mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), PAGE_SIZE << order); } -static __always_inline void uncharge_slab_page(struct page *page, int order, - struct kmem_cache *s) +static __always_inline void unaccount_slab_page(struct page *page, int order, + struct kmem_cache *s) { if (memcg_kmem_enabled()) memcg_free_page_obj_cgroups(page); --- a/mm/slub.c~mm-slab-rename-uncharge_slab_page-to-unaccount_slab_page +++ a/mm/slub.c @@ -1621,7 +1621,7 @@ static inline struct page *alloc_slab_pa page = __alloc_pages_node(node, flags, order); if (page) - charge_slab_page(page, order, s); + account_slab_page(page, order, s); return page; } @@ -1844,7 +1844,7 @@ static void __free_slab(struct kmem_cach page->mapping = NULL; if (current->reclaim_state) current->reclaim_state->reclaimed_slab += pages; - uncharge_slab_page(page, order, s); + unaccount_slab_page(page, order, s); __free_pages(page, order); } _