From: Michal Hocko <mhocko@xxxxxxxx> The sufix "kmalloc" is misleading. Rename it to alloc_section_memmap/free_section_memmap which better reflects the funcionality. Signed-off-by: Michal Hocko <mhocko@xxxxxxxx> Signed-off-by: Oscar Salvador <osalvador@xxxxxxx> --- mm/sparse.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/mm/sparse.c b/mm/sparse.c index dd30468dc8f5..27428b965d46 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -676,13 +676,13 @@ static void free_deferred_vmemmap_range(unsigned long start, in_vmemmap_range = false; } -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, +static inline struct page *alloc_section_memmap(unsigned long pnum, int nid, struct vmem_altmap *altmap) { /* This will make the necessary allocations eventually. */ return sparse_mem_map_populate(pnum, nid, altmap); } -static void __kfree_section_memmap(struct page *memmap, +static void free_section_memmap(struct page *memmap, struct vmem_altmap *altmap) { unsigned long start = (unsigned long)memmap; @@ -732,13 +732,13 @@ static struct page *__kmalloc_section_memmap(void) return ret; } -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, +static inline struct page *alloc_section_memmap(unsigned long pnum, int nid, struct vmem_altmap *altmap) { return __kmalloc_section_memmap(); } -static void __kfree_section_memmap(struct page *memmap, +static void free_section_memmap(struct page *memmap, struct vmem_altmap *altmap) { if (is_vmalloc_addr(memmap)) @@ -803,12 +803,12 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn, if (ret < 0 && ret != -EEXIST) return ret; ret = 0; - memmap = kmalloc_section_memmap(section_nr, nid, altmap); + memmap = alloc_section_memmap(section_nr, nid, altmap); if (!memmap) return -ENOMEM; usemap = __kmalloc_section_usemap(); if (!usemap) { - __kfree_section_memmap(memmap, altmap); + free_section_memmap(memmap, altmap); return -ENOMEM; } @@ -830,7 +830,7 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn, out: if (ret < 0) { kfree(usemap); - __kfree_section_memmap(memmap, altmap); + free_section_memmap(memmap, altmap); } return ret; } @@ -881,7 +881,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap, if (PageSlab(usemap_page) || PageCompound(usemap_page)) { kfree(usemap); if (memmap) - __kfree_section_memmap(memmap, altmap); + free_section_memmap(memmap, altmap); return; } -- 2.13.7