[merged mm-stable] mm-mm_init-rename-__init_reserved_page_zone-to-__init_page_from_nid.patch removed from -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The quilt patch titled
     Subject: mm/mm_init: rename __init_reserved_page_zone to __init_page_from_nid
has been removed from the -mm tree.  Its filename was
     mm-mm_init-rename-__init_reserved_page_zone-to-__init_page_from_nid.patch

This patch was dropped because it was merged into the mm-stable branch
of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

------------------------------------------------------
From: "Mike Rapoport (Microsoft)" <rppt@xxxxxxxxxx>
Subject: mm/mm_init: rename __init_reserved_page_zone to __init_page_from_nid
Date: Tue, 25 Feb 2025 10:30:16 +0200

__init_reserved_page_zone() function finds the zone for pfn and nid and
performs initialization of a struct page with that zone and nid.  There is
nothing in that function about reserved pages and it is misnamed.

Rename it to __init_page_from_nid() to better reflect what the function
does.

Link: https://lkml.kernel.org/r/20250225083017.567649-2-rppt@xxxxxxxxxx
Signed-off-by: Mike Rapoport (Microsoft) <rppt@xxxxxxxxxx>
Reviewed-by: Wei Yang <richard.weiyang@xxxxxxxxx>
Cc: Frank van der Linden <fvdl@xxxxxxxxxx>
Cc: Muchun Song <muchun.song@xxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 mm/hugetlb.c  |    2 +-
 mm/internal.h |    2 +-
 mm/mm_init.c  |    4 ++--
 3 files changed, 4 insertions(+), 4 deletions(-)

--- a/mm/hugetlb.c~mm-mm_init-rename-__init_reserved_page_zone-to-__init_page_from_nid
+++ a/mm/hugetlb.c
@@ -3407,7 +3407,7 @@ static void __init hugetlb_bootmem_free_
 
 	while (npages--) {
 		pfn = page_to_pfn(page);
-		__init_reserved_page_zone(pfn, nid);
+		__init_page_from_nid(pfn, nid);
 		free_reserved_page(page);
 		page++;
 	}
--- a/mm/internal.h~mm-mm_init-rename-__init_reserved_page_zone-to-__init_page_from_nid
+++ a/mm/internal.h
@@ -1518,7 +1518,7 @@ static inline bool pte_needs_soft_dirty_
 
 void __meminit __init_single_page(struct page *page, unsigned long pfn,
 				unsigned long zone, int nid);
-void __meminit __init_reserved_page_zone(unsigned long pfn, int nid);
+void __meminit __init_page_from_nid(unsigned long pfn, int nid);
 
 /* shrinker related functions */
 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
--- a/mm/mm_init.c~mm-mm_init-rename-__init_reserved_page_zone-to-__init_page_from_nid
+++ a/mm/mm_init.c
@@ -668,7 +668,7 @@ static inline void fixup_hashdist(void)
 /*
  * Initialize a reserved page unconditionally, finding its zone first.
  */
-void __meminit __init_reserved_page_zone(unsigned long pfn, int nid)
+void __meminit __init_page_from_nid(unsigned long pfn, int nid)
 {
 	pg_data_t *pgdat;
 	int zid;
@@ -748,7 +748,7 @@ static void __meminit init_reserved_page
 	if (early_page_initialised(pfn, nid))
 		return;
 
-	__init_reserved_page_zone(pfn, nid);
+	__init_page_from_nid(pfn, nid);
 }
 #else
 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
_

Patches currently in -mm which might be from rppt@xxxxxxxxxx are






[Index of Archives]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux