The patch titled Subject: memblock: use separate iterators for memory and reserved regions has been added to the -mm tree. Its filename is memblock-use-separate-iterators-for-memory-and-reserved-regions.patch This patch should soon appear at https://ozlabs.org/~akpm/mmots/broken-out/memblock-use-separate-iterators-for-memory-and-reserved-regions.patch and later at https://ozlabs.org/~akpm/mmotm/broken-out/memblock-use-separate-iterators-for-memory-and-reserved-regions.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Mike Rapoport <rppt@xxxxxxxxxxxxx> Subject: memblock: use separate iterators for memory and reserved regions for_each_memblock() is used to iterate over memblock.memory in a few places that use data from memblock_region rather than the memory ranges. Introduce separate for_each_mem_region() and for_each_reserved_mem_region() to improve encapsulation of memblock internals from its users. Link: https://lkml.kernel.org/r/20200818151634.14343-18-rppt@xxxxxxxxxx Signed-off-by: Mike Rapoport <rppt@xxxxxxxxxxxxx> Reviewed-by: Baoquan He <bhe@xxxxxxxxxx> Acked-by: Ingo Molnar <mingo@xxxxxxxxxx> [x86] Acked-by: Thomas Bogendoerfer <tsbogend@xxxxxxxxxxxxxxxx> [MIPS] Acked-by: Miguel Ojeda <miguel.ojeda.sandonis@xxxxxxxxx> [.clang-format] Cc: Andy Lutomirski <luto@xxxxxxxxxx> Cc: Benjamin Herrenschmidt <benh@xxxxxxxxxxxxxxxxxxx> Cc: Borislav Petkov <bp@xxxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Christoph Hellwig <hch@xxxxxx> Cc: Daniel Axtens <dja@xxxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: Emil Renner Berthing <kernel@xxxxxxxx> Cc: Hari Bathini <hbathini@xxxxxxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Jonathan Cameron <Jonathan.Cameron@xxxxxxxxxx> Cc: Marek Szyprowski <m.szyprowski@xxxxxxxxxxx> Cc: Max Filippov <jcmvbkbc@xxxxxxxxx> Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx> Cc: Michal Simek <monstr@xxxxxxxxx> Cc: Palmer Dabbelt <palmer@xxxxxxxxxxx> Cc: Paul Mackerras <paulus@xxxxxxxxx> Cc: Paul Walmsley <paul.walmsley@xxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Russell King <linux@xxxxxxxxxxxxxxx> Cc: Stafford Horne <shorne@xxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Cc: Yoshinori Sato <ysato@xxxxxxxxxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- .clang-format | 3 ++- arch/arm64/kernel/setup.c | 2 +- arch/arm64/mm/numa.c | 2 +- arch/mips/netlogic/xlp/setup.c | 2 +- arch/riscv/mm/init.c | 2 +- arch/x86/mm/numa.c | 2 +- include/linux/memblock.h | 19 ++++++++++++++++--- mm/memblock.c | 4 ++-- mm/page_alloc.c | 8 ++++---- 9 files changed, 29 insertions(+), 15 deletions(-) --- a/arch/arm64/kernel/setup.c~memblock-use-separate-iterators-for-memory-and-reserved-regions +++ a/arch/arm64/kernel/setup.c @@ -217,7 +217,7 @@ static void __init request_standard_reso if (!standard_resources) panic("%s: Failed to allocate %zu bytes\n", __func__, res_size); - for_each_memblock(memory, region) { + for_each_mem_region(region) { res = &standard_resources[i++]; if (memblock_is_nomap(region)) { res->name = "reserved"; --- a/arch/arm64/mm/numa.c~memblock-use-separate-iterators-for-memory-and-reserved-regions +++ a/arch/arm64/mm/numa.c @@ -350,7 +350,7 @@ static int __init numa_register_nodes(vo struct memblock_region *mblk; /* Check that valid nid is set to memblks */ - for_each_memblock(memory, mblk) { + for_each_mem_region(mblk) { int mblk_nid = memblock_get_region_node(mblk); if (mblk_nid == NUMA_NO_NODE || mblk_nid >= MAX_NUMNODES) { --- a/arch/mips/netlogic/xlp/setup.c~memblock-use-separate-iterators-for-memory-and-reserved-regions +++ a/arch/mips/netlogic/xlp/setup.c @@ -70,7 +70,7 @@ static void nlm_fixup_mem(void) const int pref_backup = 512; struct memblock_region *mem; - for_each_memblock(memory, mem) { + for_each_mem_region(mem) { memblock_remove(mem->base + mem->size - pref_backup, pref_backup); } --- a/arch/riscv/mm/init.c~memblock-use-separate-iterators-for-memory-and-reserved-regions +++ a/arch/riscv/mm/init.c @@ -531,7 +531,7 @@ static void __init resource_init(void) { struct memblock_region *region; - for_each_memblock(memory, region) { + for_each_mem_region(region) { struct resource *res; res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); --- a/arch/x86/mm/numa.c~memblock-use-separate-iterators-for-memory-and-reserved-regions +++ a/arch/x86/mm/numa.c @@ -516,7 +516,7 @@ static void __init numa_clear_kernel_nod * memory ranges, because quirks such as trim_snb_memory() * reserve specific pages for Sandy Bridge graphics. ] */ - for_each_memblock(reserved, mb_region) { + for_each_reserved_mem_region(mb_region) { int nid = memblock_get_region_node(mb_region); if (nid != MAX_NUMNODES) --- a/.clang-format~memblock-use-separate-iterators-for-memory-and-reserved-regions +++ a/.clang-format @@ -201,7 +201,7 @@ ForEachMacros: - 'for_each_matching_node' - 'for_each_matching_node_and_match' - 'for_each_member' - - 'for_each_memblock' + - 'for_each_mem_region' - 'for_each_memblock_type' - 'for_each_memcg_cache_index' - 'for_each_mem_pfn_range' @@ -268,6 +268,7 @@ ForEachMacros: - 'for_each_property_of_node' - 'for_each_registered_fb' - 'for_each_reserved_mem_range' + - 'for_each_reserved_mem_region' - 'for_each_rtd_codec_dais' - 'for_each_rtd_codec_dais_rollback' - 'for_each_rtd_components' --- a/include/linux/memblock.h~memblock-use-separate-iterators-for-memory-and-reserved-regions +++ a/include/linux/memblock.h @@ -553,9 +553,22 @@ static inline unsigned long memblock_reg return PFN_UP(reg->base + reg->size); } -#define for_each_memblock(memblock_type, region) \ - for (region = memblock.memblock_type.regions; \ - region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ +/** + * for_each_mem_region - itereate over memory regions + * @region: loop variable + */ +#define for_each_mem_region(region) \ + for (region = memblock.memory.regions; \ + region < (memblock.memory.regions + memblock.memory.cnt); \ + region++) + +/** + * for_each_reserved_mem_region - itereate over reserved memory regions + * @region: loop variable + */ +#define for_each_reserved_mem_region(region) \ + for (region = memblock.reserved.regions; \ + region < (memblock.reserved.regions + memblock.reserved.cnt); \ region++) extern void *alloc_large_system_hash(const char *tablename, --- a/mm/memblock.c~memblock-use-separate-iterators-for-memory-and-reserved-regions +++ a/mm/memblock.c @@ -1667,7 +1667,7 @@ static phys_addr_t __init_memblock __fin * the memory memblock regions, if the @limit exceeds the total size * of those regions, max_addr will keep original value PHYS_ADDR_MAX */ - for_each_memblock(memory, r) { + for_each_mem_region(r) { if (limit <= r->size) { max_addr = r->base + limit; break; @@ -1837,7 +1837,7 @@ void __init_memblock memblock_trim_memor phys_addr_t start, end, orig_start, orig_end; struct memblock_region *r; - for_each_memblock(memory, r) { + for_each_mem_region(r) { orig_start = r->base; orig_end = r->base + r->size; start = round_up(orig_start, align); --- a/mm/page_alloc.c~memblock-use-separate-iterators-for-memory-and-reserved-regions +++ a/mm/page_alloc.c @@ -5955,7 +5955,7 @@ overlap_memmap_init(unsigned long zone, if (mirrored_kernelcore && zone == ZONE_MOVABLE) { if (!r || *pfn >= memblock_region_memory_end_pfn(r)) { - for_each_memblock(memory, r) { + for_each_mem_region(r) { if (*pfn < memblock_region_memory_end_pfn(r)) break; } @@ -6540,7 +6540,7 @@ static unsigned long __init zone_absent_ unsigned long start_pfn, end_pfn; struct memblock_region *r; - for_each_memblock(memory, r) { + for_each_mem_region(r) { start_pfn = clamp(memblock_region_memory_base_pfn(r), zone_start_pfn, zone_end_pfn); end_pfn = clamp(memblock_region_memory_end_pfn(r), @@ -7134,7 +7134,7 @@ static void __init find_zone_movable_pfn * options. */ if (movable_node_is_enabled()) { - for_each_memblock(memory, r) { + for_each_mem_region(r) { if (!memblock_is_hotpluggable(r)) continue; @@ -7155,7 +7155,7 @@ static void __init find_zone_movable_pfn if (mirrored_kernelcore) { bool mem_below_4gb_not_mirrored = false; - for_each_memblock(memory, r) { + for_each_mem_region(r) { if (memblock_is_mirror(r)) continue; _ Patches currently in -mm which might be from rppt@xxxxxxxxxxxxx are kvm-ppc-book3s-hv-simplify-kvm_cma_reserve.patch dma-contiguous-simplify-cma_early_percent_memory.patch arm-xtensa-simplify-initialization-of-high-memory-pages.patch arm64-numa-simplify-dummy_numa_init.patch h8300-nds32-openrisc-simplify-detection-of-memory-extents.patch riscv-drop-unneeded-node-initialization.patch mircoblaze-drop-unneeded-numa-and-sparsemem-initializations.patch memblock-make-for_each_memblock_type-iterator-private.patch memblock-make-memblock_debug-and-related-functionality-private.patch memblock-reduce-number-of-parameters-in-for_each_mem_range.patch arch-mm-replace-for_each_memblock-with-for_each_mem_pfn_range.patch arch-drivers-replace-for_each_membock-with-for_each_mem_range.patch x86-setup-simplify-initrd-relocation-and-reservation.patch x86-setup-simplify-reserve_crashkernel.patch memblock-remove-unused-memblock_mem_size.patch memblock-implement-for_each_reserved_mem_region-using-__next_mem_region.patch memblock-use-separate-iterators-for-memory-and-reserved-regions.patch