The patch titled Subject: memblock: introduce memmap_init_kho_scratch() has been added to the -mm mm-nonmm-unstable branch. Its filename is memblock-introduce-memmap_init_kho_scratch.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/memblock-introduce-memmap_init_kho_scratch.patch This patch will later appear in the mm-nonmm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: "Mike Rapoport (Microsoft)" <rppt@xxxxxxxxxx> Subject: memblock: introduce memmap_init_kho_scratch() Date: Thu, 6 Feb 2025 15:27:44 +0200 With deferred initialization of struct page it will be necessary to initialize memory map for KHO scratch regions early. Add memmap_init_kho_scratch() method that will allow such initialization in upcoming patches. Link: https://lkml.kernel.org/r/20250206132754.2596694-5-rppt@xxxxxxxxxx Signed-off-by: Mike Rapoport (Microsoft) <rppt@xxxxxxxxxx> Cc: Alexander Graf <graf@xxxxxxxxxx> Cc: Andy Lutomirski <luto@xxxxxxxxxx> Cc: Anthony Yznaga <anthony.yznaga@xxxxxxxxxx> Cc: Arnd Bergmann <arnd@xxxxxxxx> Cc: Ashish Kalra <ashish.kalra@xxxxxxx> Cc: Ben Herrenschmidt <benh@xxxxxxxxxxxxxxxxxxx> Cc: Borislav Betkov <bp@xxxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: David Woodhouse <dwmw2@xxxxxxxxxxxxx> Cc: Eric Biederman <ebiederm@xxxxxxxxxxxx> Cc: "H. Peter Anvin" <hpa@xxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: James Gowans <jgowans@xxxxxxxxxx> Cc: Jonathan Corbet <corbet@xxxxxxx> Cc: Krzysztof Kozlowski <krzk@xxxxxxxxxx> Cc: Mark Rutland <mark.rutland@xxxxxxx> Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx> Cc: Pasha Tatashin <pasha.tatashin@xxxxxxxxxx> Cc: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Cc: Pratyush Yadav <ptyadav@xxxxxxxxx> Cc: Rob Herring <robh+dt@xxxxxxxxxx> Cc: Rob Herring <robh@xxxxxxxxxx> Cc: Saravana Kannan <saravanak@xxxxxxxxxx> Cc: Stanislav Kinsburskii <skinsburskii@xxxxxxxxxxxxxxxxxxx> Cc: Steven Rostedt (VMware) <rostedt@xxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Tom Lendacky <thomas.lendacky@xxxxxxx> Cc: Usama Arif <usama.arif@xxxxxxxxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/memblock.h | 2 ++ mm/internal.h | 2 ++ mm/memblock.c | 22 ++++++++++++++++++++++ mm/mm_init.c | 11 ++++++++--- 4 files changed, 34 insertions(+), 3 deletions(-) --- a/include/linux/memblock.h~memblock-introduce-memmap_init_kho_scratch +++ a/include/linux/memblock.h @@ -633,9 +633,11 @@ static inline void memtest_report_meminf #ifdef CONFIG_MEMBLOCK_KHO_SCRATCH void memblock_set_kho_scratch_only(void); void memblock_clear_kho_scratch_only(void); +void memmap_init_kho_scratch_pages(void); #else static inline void memblock_set_kho_scratch_only(void) { } static inline void memblock_clear_kho_scratch_only(void) { } +static inline void memmap_init_kho_scratch_pages(void) {} #endif #endif /* _LINUX_MEMBLOCK_H */ --- a/mm/internal.h~memblock-introduce-memmap_init_kho_scratch +++ a/mm/internal.h @@ -1069,6 +1069,8 @@ DECLARE_STATIC_KEY_TRUE(deferred_pages); bool __init deferred_grow_zone(struct zone *zone, unsigned int order); #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ +void init_deferred_page(unsigned long pfn, int nid); + enum mminit_level { MMINIT_WARNING, MMINIT_VERIFY, --- a/mm/memblock.c~memblock-introduce-memmap_init_kho_scratch +++ a/mm/memblock.c @@ -945,6 +945,28 @@ __init_memblock void memblock_clear_kho_ { kho_scratch_only = false; } + +void __init_memblock memmap_init_kho_scratch_pages(void) +{ + phys_addr_t start, end; + unsigned long pfn; + int nid; + u64 i; + + if (!IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) + return; + + /* + * Initialize struct pages for free scratch memory. + * The struct pages for reserved scratch memory will be set up in + * reserve_bootmem_region() + */ + __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, + MEMBLOCK_KHO_SCRATCH, &start, &end, &nid) { + for (pfn = PFN_UP(start); pfn < PFN_DOWN(end); pfn++) + init_deferred_page(pfn, nid); + } +} #endif /** --- a/mm/mm_init.c~memblock-introduce-memmap_init_kho_scratch +++ a/mm/mm_init.c @@ -728,7 +728,7 @@ defer_init(int nid, unsigned long pfn, u return false; } -static void __meminit init_deferred_page(unsigned long pfn, int nid) +static void __meminit __init_deferred_page(unsigned long pfn, int nid) { if (early_page_initialised(pfn, nid)) return; @@ -748,11 +748,16 @@ static inline bool defer_init(int nid, u return false; } -static inline void init_deferred_page(unsigned long pfn, int nid) +static inline void __init_deferred_page(unsigned long pfn, int nid) { } #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ +void __meminit init_deferred_page(unsigned long pfn, int nid) +{ + __init_deferred_page(pfn, nid); +} + /* * Initialised pages do not have PageReserved set. This function is * called for each range allocated by the bootmem allocator and @@ -769,7 +774,7 @@ void __meminit reserve_bootmem_region(ph if (pfn_valid(start_pfn)) { struct page *page = pfn_to_page(start_pfn); - init_deferred_page(start_pfn, nid); + __init_deferred_page(start_pfn, nid); /* * no need for atomic set_bit because the struct _ Patches currently in -mm which might be from rppt@xxxxxxxxxx are mm-mm_init-rename-init_reserved_page-to-init_deferred_page.patch memblock-add-memblock_rsrv_kern-flag.patch memblock-introduce-memmap_init_kho_scratch.patch x86-setup-use-memblock_reserve_kern-for-memory-used-by-kernel.patch documentation-kho-add-memblock-bindings.patch