[patch 178/181] memblock: use separate iterators for memory and reserved regions

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Mike Rapoport <rppt@xxxxxxxxxxxxx>
Subject: memblock: use separate iterators for memory and reserved regions

for_each_memblock() is used to iterate over memblock.memory in a few
places that use data from memblock_region rather than the memory ranges.

Introduce separate for_each_mem_region() and
for_each_reserved_mem_region() to improve encapsulation of memblock
internals from its users.

Link: https://lkml.kernel.org/r/20200818151634.14343-18-rppt@xxxxxxxxxx
Signed-off-by: Mike Rapoport <rppt@xxxxxxxxxxxxx>
Reviewed-by: Baoquan He <bhe@xxxxxxxxxx>
Acked-by: Ingo Molnar <mingo@xxxxxxxxxx>			[x86]
Acked-by: Thomas Bogendoerfer <tsbogend@xxxxxxxxxxxxxxxx>	[MIPS]
Acked-by: Miguel Ojeda <miguel.ojeda.sandonis@xxxxxxxxx>	[.clang-format]
Cc: Andy Lutomirski <luto@xxxxxxxxxx>
Cc: Benjamin Herrenschmidt <benh@xxxxxxxxxxxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Catalin Marinas <catalin.marinas@xxxxxxx>
Cc: Christoph Hellwig <hch@xxxxxx>
Cc: Daniel Axtens <dja@xxxxxxxxxx>
Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Cc: Emil Renner Berthing <kernel@xxxxxxxx>
Cc: Hari Bathini <hbathini@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Jonathan Cameron <Jonathan.Cameron@xxxxxxxxxx>
Cc: Marek Szyprowski <m.szyprowski@xxxxxxxxxxx>
Cc: Max Filippov <jcmvbkbc@xxxxxxxxx>
Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx>
Cc: Michal Simek <monstr@xxxxxxxxx>
Cc: Palmer Dabbelt <palmer@xxxxxxxxxxx>
Cc: Paul Mackerras <paulus@xxxxxxxxx>
Cc: Paul Walmsley <paul.walmsley@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Russell King <linux@xxxxxxxxxxxxxxx>
Cc: Stafford Horne <shorne@xxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Will Deacon <will@xxxxxxxxxx>
Cc: Yoshinori Sato <ysato@xxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 .clang-format                  |    3 ++-
 arch/arm64/kernel/setup.c      |    2 +-
 arch/arm64/mm/numa.c           |    2 +-
 arch/mips/netlogic/xlp/setup.c |    2 +-
 arch/riscv/mm/init.c           |    2 +-
 arch/x86/mm/numa.c             |    2 +-
 include/linux/memblock.h       |   19 ++++++++++++++++---
 mm/memblock.c                  |    4 ++--
 mm/page_alloc.c                |    8 ++++----
 9 files changed, 29 insertions(+), 15 deletions(-)

--- a/arch/arm64/kernel/setup.c~memblock-use-separate-iterators-for-memory-and-reserved-regions
+++ a/arch/arm64/kernel/setup.c
@@ -217,7 +217,7 @@ static void __init request_standard_reso
 	if (!standard_resources)
 		panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
 
-	for_each_memblock(memory, region) {
+	for_each_mem_region(region) {
 		res = &standard_resources[i++];
 		if (memblock_is_nomap(region)) {
 			res->name  = "reserved";
--- a/arch/arm64/mm/numa.c~memblock-use-separate-iterators-for-memory-and-reserved-regions
+++ a/arch/arm64/mm/numa.c
@@ -354,7 +354,7 @@ static int __init numa_register_nodes(vo
 	struct memblock_region *mblk;
 
 	/* Check that valid nid is set to memblks */
-	for_each_memblock(memory, mblk) {
+	for_each_mem_region(mblk) {
 		int mblk_nid = memblock_get_region_node(mblk);
 
 		if (mblk_nid == NUMA_NO_NODE || mblk_nid >= MAX_NUMNODES) {
--- a/arch/mips/netlogic/xlp/setup.c~memblock-use-separate-iterators-for-memory-and-reserved-regions
+++ a/arch/mips/netlogic/xlp/setup.c
@@ -70,7 +70,7 @@ static void nlm_fixup_mem(void)
 	const int pref_backup = 512;
 	struct memblock_region *mem;
 
-	for_each_memblock(memory, mem) {
+	for_each_mem_region(mem) {
 		memblock_remove(mem->base + mem->size - pref_backup,
 			pref_backup);
 	}
--- a/arch/riscv/mm/init.c~memblock-use-separate-iterators-for-memory-and-reserved-regions
+++ a/arch/riscv/mm/init.c
@@ -531,7 +531,7 @@ static void __init resource_init(void)
 {
 	struct memblock_region *region;
 
-	for_each_memblock(memory, region) {
+	for_each_mem_region(region) {
 		struct resource *res;
 
 		res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
--- a/arch/x86/mm/numa.c~memblock-use-separate-iterators-for-memory-and-reserved-regions
+++ a/arch/x86/mm/numa.c
@@ -514,7 +514,7 @@ static void __init numa_clear_kernel_nod
 	 *   memory ranges, because quirks such as trim_snb_memory()
 	 *   reserve specific pages for Sandy Bridge graphics. ]
 	 */
-	for_each_memblock(reserved, mb_region) {
+	for_each_reserved_mem_region(mb_region) {
 		int nid = memblock_get_region_node(mb_region);
 
 		if (nid != MAX_NUMNODES)
--- a/.clang-format~memblock-use-separate-iterators-for-memory-and-reserved-regions
+++ a/.clang-format
@@ -203,7 +203,7 @@ ForEachMacros:
   - 'for_each_matching_node'
   - 'for_each_matching_node_and_match'
   - 'for_each_member'
-  - 'for_each_memblock'
+  - 'for_each_mem_region'
   - 'for_each_memblock_type'
   - 'for_each_memcg_cache_index'
   - 'for_each_mem_pfn_range'
@@ -274,6 +274,7 @@ ForEachMacros:
   - 'for_each_requested_gpio'
   - 'for_each_requested_gpio_in_range'
   - 'for_each_reserved_mem_range'
+  - 'for_each_reserved_mem_region'
   - 'for_each_rtd_codec_dais'
   - 'for_each_rtd_codec_dais_rollback'
   - 'for_each_rtd_components'
--- a/include/linux/memblock.h~memblock-use-separate-iterators-for-memory-and-reserved-regions
+++ a/include/linux/memblock.h
@@ -553,9 +553,22 @@ static inline unsigned long memblock_reg
 	return PFN_UP(reg->base + reg->size);
 }
 
-#define for_each_memblock(memblock_type, region)					\
-	for (region = memblock.memblock_type.regions;					\
-	     region < (memblock.memblock_type.regions + memblock.memblock_type.cnt);	\
+/**
+ * for_each_mem_region - itereate over memory regions
+ * @region: loop variable
+ */
+#define for_each_mem_region(region)					\
+	for (region = memblock.memory.regions;				\
+	     region < (memblock.memory.regions + memblock.memory.cnt);	\
+	     region++)
+
+/**
+ * for_each_reserved_mem_region - itereate over reserved memory regions
+ * @region: loop variable
+ */
+#define for_each_reserved_mem_region(region)				\
+	for (region = memblock.reserved.regions;			\
+	     region < (memblock.reserved.regions + memblock.reserved.cnt); \
 	     region++)
 
 extern void *alloc_large_system_hash(const char *tablename,
--- a/mm/memblock.c~memblock-use-separate-iterators-for-memory-and-reserved-regions
+++ a/mm/memblock.c
@@ -1667,7 +1667,7 @@ static phys_addr_t __init_memblock __fin
 	 * the memory memblock regions, if the @limit exceeds the total size
 	 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
 	 */
-	for_each_memblock(memory, r) {
+	for_each_mem_region(r) {
 		if (limit <= r->size) {
 			max_addr = r->base + limit;
 			break;
@@ -1837,7 +1837,7 @@ void __init_memblock memblock_trim_memor
 	phys_addr_t start, end, orig_start, orig_end;
 	struct memblock_region *r;
 
-	for_each_memblock(memory, r) {
+	for_each_mem_region(r) {
 		orig_start = r->base;
 		orig_end = r->base + r->size;
 		start = round_up(orig_start, align);
--- a/mm/page_alloc.c~memblock-use-separate-iterators-for-memory-and-reserved-regions
+++ a/mm/page_alloc.c
@@ -5961,7 +5961,7 @@ overlap_memmap_init(unsigned long zone,
 
 	if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
 		if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
-			for_each_memblock(memory, r) {
+			for_each_mem_region(r) {
 				if (*pfn < memblock_region_memory_end_pfn(r))
 					break;
 			}
@@ -6546,7 +6546,7 @@ static unsigned long __init zone_absent_
 		unsigned long start_pfn, end_pfn;
 		struct memblock_region *r;
 
-		for_each_memblock(memory, r) {
+		for_each_mem_region(r) {
 			start_pfn = clamp(memblock_region_memory_base_pfn(r),
 					  zone_start_pfn, zone_end_pfn);
 			end_pfn = clamp(memblock_region_memory_end_pfn(r),
@@ -7140,7 +7140,7 @@ static void __init find_zone_movable_pfn
 	 * options.
 	 */
 	if (movable_node_is_enabled()) {
-		for_each_memblock(memory, r) {
+		for_each_mem_region(r) {
 			if (!memblock_is_hotpluggable(r))
 				continue;
 
@@ -7161,7 +7161,7 @@ static void __init find_zone_movable_pfn
 	if (mirrored_kernelcore) {
 		bool mem_below_4gb_not_mirrored = false;
 
-		for_each_memblock(memory, r) {
+		for_each_mem_region(r) {
 			if (memblock_is_mirror(r))
 				continue;
 
_




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux