This patch adds a methord to charge or uncharge a given vmalloc-ed address. It is similar to vfree, except that it doesn't touch the related pages while does account only. Signed-off-by: Yafang Shao <laoar.shao@xxxxxxxxx> --- include/linux/slab.h | 1 + include/linux/vmalloc.h | 1 + mm/util.c | 9 +++++++++ mm/vmalloc.c | 29 +++++++++++++++++++++++++++++ 4 files changed, 40 insertions(+) diff --git a/include/linux/slab.h b/include/linux/slab.h index ae82e23..7173354 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -759,6 +759,7 @@ extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flag __alloc_size(3); extern void kvfree(const void *addr); extern void kvfree_sensitive(const void *addr, size_t len); +void kvcharge(const void *addr, bool charge); unsigned int kmem_cache_size(struct kmem_cache *s); void __init kmem_cache_init_late(void); diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 880227b..b48d941 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -161,6 +161,7 @@ void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, extern void vfree(const void *addr); extern void vfree_atomic(const void *addr); +void vcharge(const void *addr, bool charge); extern void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot); diff --git a/mm/util.c b/mm/util.c index 7e433690..f5f5e05 100644 --- a/mm/util.c +++ b/mm/util.c @@ -614,6 +614,15 @@ void kvfree(const void *addr) } EXPORT_SYMBOL(kvfree); +void kvcharge(const void *addr, bool charge) +{ + if (is_vmalloc_addr(addr)) + vcharge(addr, charge); + else + kcharge(addr, charge); +} +EXPORT_SYMBOL(kvcharge); + /** * kvfree_sensitive - Free a data object containing sensitive information. * @addr: address of the data object to be freed. diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 4165304..6fc2295 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2715,6 +2715,35 @@ void vfree(const void *addr) } EXPORT_SYMBOL(vfree); +void vcharge(const void *addr, bool charge) +{ + unsigned int page_order; + struct vm_struct *area; + int i; + + WARN_ON(!in_task()); + + if (!addr) + return; + + area = find_vm_area(addr); + if (unlikely(!area)) + return; + + page_order = vm_area_page_order(area); + for (i = 0; i < area->nr_pages; i += 1U << page_order) { + struct page *page = area->pages[i]; + + WARN_ON(!page); + if (charge) + memcg_kmem_charge_page(page, GFP_KERNEL, page_order); + else + memcg_kmem_uncharge_page(page, page_order); + cond_resched(); + } +} +EXPORT_SYMBOL(vcharge); + /** * vunmap - release virtual mapping obtained by vmap() * @addr: memory base address -- 1.8.3.1