The patch titled Subject: mm/sparsemem: convert kmalloc_section_memmap() to populate_section_memmap() has been removed from the -mm tree. Its filename was mm-sparsemem-convert-kmalloc_section_memmap-to-populate_section_memmap.patch This patch was dropped because it was withdrawn ------------------------------------------------------ From: Dan Williams <dan.j.williams@xxxxxxxxx> Subject: mm/sparsemem: convert kmalloc_section_memmap() to populate_section_memmap() Allow sub-section sized ranges to be added to the memmap. populate_section_memmap() takes an explicit pfn range rather than assuming a full section, and those parameters are plumbed all the way through to vmmemap_populate(). There should be no sub-section usage in current deployments. New warnings are added to clarify which memmap allocation paths are sub-section capable. Link: http://lkml.kernel.org/r/155552636181.2015392.6062894291885124658.stgit@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx Signed-off-by: Dan Williams <dan.j.williams@xxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: Logan Gunthorpe <logang@xxxxxxxxxxxx> Cc: Jeff Moyer <jmoyer@xxxxxxxxxx> Cc: Jérôme Glisse <jglisse@xxxxxxxxxx> Cc: Toshi Kani <toshi.kani@xxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Oscar Salvador <osalvador@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- arch/x86/mm/init_64.c | 4 +- include/linux/mm.h | 4 +- mm/sparse-vmemmap.c | 21 +++++++++---- mm/sparse.c | 61 ++++++++++++++++++++++++---------------- 4 files changed, 57 insertions(+), 33 deletions(-) --- a/arch/x86/mm/init_64.c~mm-sparsemem-convert-kmalloc_section_memmap-to-populate_section_memmap +++ a/arch/x86/mm/init_64.c @@ -1457,7 +1457,9 @@ int __meminit vmemmap_populate(unsigned { int err; - if (boot_cpu_has(X86_FEATURE_PSE)) + if (end - start < PAGES_PER_SECTION * sizeof(struct page)) + err = vmemmap_populate_basepages(start, end, node); + else if (boot_cpu_has(X86_FEATURE_PSE)) err = vmemmap_populate_hugepages(start, end, node, altmap); else if (altmap) { pr_err_once("%s: no cpu support for altmap allocations\n", --- a/include/linux/mm.h~mm-sparsemem-convert-kmalloc_section_memmap-to-populate_section_memmap +++ a/include/linux/mm.h @@ -2757,8 +2757,8 @@ const char * arch_vma_name(struct vm_are void print_vma_addr(char *prefix, unsigned long rip); void *sparse_buffer_alloc(unsigned long size); -struct page *sparse_mem_map_populate(unsigned long pnum, int nid, - struct vmem_altmap *altmap); +struct page * __populate_section_memmap(unsigned long pfn, + unsigned long nr_pages, int nid, struct vmem_altmap *altmap); pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); --- a/mm/sparse.c~mm-sparsemem-convert-kmalloc_section_memmap-to-populate_section_memmap +++ a/mm/sparse.c @@ -452,8 +452,8 @@ static unsigned long __init section_map_ return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); } -struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid, - struct vmem_altmap *altmap) +struct page __init *__populate_section_memmap(unsigned long pfn, + unsigned long nr_pages, int nid, struct vmem_altmap *altmap) { unsigned long size = section_map_size(); struct page *map = sparse_buffer_alloc(size); @@ -534,10 +534,13 @@ static void __init sparse_init_nid(int n } sparse_buffer_init(map_count * section_map_size(), nid); for_each_present_section_nr(pnum_begin, pnum) { + unsigned long pfn = section_nr_to_pfn(pnum); + if (pnum >= pnum_end) break; - map = sparse_mem_map_populate(pnum, nid, NULL); + map = __populate_section_memmap(pfn, PAGES_PER_SECTION, + nid, NULL); if (!map) { pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.", __func__, nid); @@ -637,17 +640,17 @@ void offline_mem_sections(unsigned long #endif #ifdef CONFIG_SPARSEMEM_VMEMMAP -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, - struct vmem_altmap *altmap) +static struct page *populate_section_memmap(unsigned long pfn, + unsigned long nr_pages, int nid, struct vmem_altmap *altmap) { - /* This will make the necessary allocations eventually. */ - return sparse_mem_map_populate(pnum, nid, altmap); + return __populate_section_memmap(pfn, nr_pages, nid, altmap); } -static void __kfree_section_memmap(struct page *memmap, + +static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages, struct vmem_altmap *altmap) { - unsigned long start = (unsigned long)memmap; - unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); + unsigned long start = (unsigned long) pfn_to_page(pfn); + unsigned long end = start + nr_pages * sizeof(struct page); vmemmap_free(start, end, altmap); } @@ -661,11 +664,18 @@ static void free_map_bootmem(struct page } #endif /* CONFIG_MEMORY_HOTREMOVE */ #else -static struct page *__kmalloc_section_memmap(void) +struct page *populate_section_memmap(unsigned long pfn, + unsigned long nr_pages, int nid, struct vmem_altmap *altmap) { struct page *page, *ret; unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; + if ((pfn & ~PAGE_SECTION_MASK) || nr_pages != PAGES_PER_SECTION) { + WARN(1, "%s: called with section unaligned parameters\n", + __func__); + return NULL; + } + page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); if (page) goto got_map_page; @@ -682,15 +692,17 @@ got_map_ptr: return ret; } -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, +static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages, struct vmem_altmap *altmap) { - return __kmalloc_section_memmap(); -} + struct page *memmap = pfn_to_page(pfn); + + if ((pfn & ~PAGE_SECTION_MASK) || nr_pages != PAGES_PER_SECTION) { + WARN(1, "%s: called with section unaligned parameters\n", + __func__); + return; + } -static void __kfree_section_memmap(struct page *memmap, - struct vmem_altmap *altmap) -{ if (is_vmalloc_addr(memmap)) vfree(memmap); else @@ -761,12 +773,13 @@ int __meminit sparse_add_one_section(int if (ret < 0 && ret != -EEXIST) return ret; ret = 0; - memmap = kmalloc_section_memmap(section_nr, nid, altmap); + memmap = populate_section_memmap(start_pfn, PAGES_PER_SECTION, nid, + altmap); if (!memmap) return -ENOMEM; usage = kzalloc(mem_section_usage_size(), GFP_KERNEL); if (!usage) { - __kfree_section_memmap(memmap, altmap); + depopulate_section_memmap(start_pfn, PAGES_PER_SECTION, altmap); return -ENOMEM; } @@ -788,7 +801,7 @@ int __meminit sparse_add_one_section(int out: if (ret < 0) { kfree(usage); - __kfree_section_memmap(memmap, altmap); + depopulate_section_memmap(start_pfn, PAGES_PER_SECTION, altmap); } return ret; } @@ -825,7 +838,8 @@ static inline void clear_hwpoisoned_page #endif static void free_section_usage(struct page *memmap, - struct mem_section_usage *usage, struct vmem_altmap *altmap) + struct mem_section_usage *usage, unsigned long pfn, + unsigned long nr_pages, struct vmem_altmap *altmap) { struct page *usage_page; @@ -839,7 +853,7 @@ static void free_section_usage(struct pa if (PageSlab(usage_page) || PageCompound(usage_page)) { kfree(usage); if (memmap) - __kfree_section_memmap(memmap, altmap); + depopulate_section_memmap(pfn, nr_pages, altmap); return; } @@ -868,7 +882,8 @@ void sparse_remove_one_section(struct zo clear_hwpoisoned_pages(memmap + map_offset, PAGES_PER_SECTION - map_offset); - free_section_usage(memmap, usage, altmap); + free_section_usage(memmap, usage, section_nr_to_pfn(__section_nr(ms)), + PAGES_PER_SECTION, altmap); } #endif /* CONFIG_MEMORY_HOTREMOVE */ #endif /* CONFIG_MEMORY_HOTPLUG */ --- a/mm/sparse-vmemmap.c~mm-sparsemem-convert-kmalloc_section_memmap-to-populate_section_memmap +++ a/mm/sparse-vmemmap.c @@ -245,19 +245,26 @@ int __meminit vmemmap_populate_basepages return 0; } -struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid, - struct vmem_altmap *altmap) +struct page * __meminit __populate_section_memmap(unsigned long pfn, + unsigned long nr_pages, int nid, struct vmem_altmap *altmap) { unsigned long start; unsigned long end; - struct page *map; - map = pfn_to_page(pnum * PAGES_PER_SECTION); - start = (unsigned long)map; - end = (unsigned long)(map + PAGES_PER_SECTION); + /* + * The minimum granularity of memmap extensions is + * SECTION_ACTIVE_SIZE as allocations are tracked in the + * 'map_active' bitmap of the section. + */ + end = ALIGN(pfn + nr_pages, PHYS_PFN(SECTION_ACTIVE_SIZE)); + pfn &= PHYS_PFN(SECTION_ACTIVE_MASK); + nr_pages = end - pfn; + + start = (unsigned long) pfn_to_page(pfn); + end = start + nr_pages * sizeof(struct page); if (vmemmap_populate(start, end, nid, altmap)) return NULL; - return map; + return pfn_to_page(pfn); } _ Patches currently in -mm which might be from dan.j.williams@xxxxxxxxx are mm-hotplug-add-mem-hotplug-restrictions-for-remove_memory.patch mm-kill-is_dev_zone-helper.patch mm-sparsemem-prepare-for-sub-section-ranges.patch mm-sparsemem-support-sub-section-hotplug.patch mm-devm_memremap_pages-enable-sub-section-remap.patch libnvdimm-pfn-fix-fsdax-mode-namespace-info-block-zero-fields.patch libnvdimm-pfn-stop-padding-pmem-namespaces-to-section-alignment.patch mm-shuffle-initial-free-memory-to-improve-memory-side-cache-utilization.patch mm-shuffle-initial-free-memory-to-improve-memory-side-cache-utilization-fix.patch mm-move-buddy-list-manipulations-into-helpers.patch mm-move-buddy-list-manipulations-into-helpers-fix.patch mm-maintain-randomization-of-page-free-lists.patch