On 11.06.24 05:08, Wei Yang wrote:
Current call flow looks like this:
start_kernel
mm_core_init
mem_init
mem_init_print_info
rest_init
kernel_init
kernel_init_freeable
page_alloc_init_late
deferred_init_memmap
If CONFIG_DEFERRED_STRUCT_PAGE_INIT, the time mem_init_print_info()
calls, pages are not totally initialized and freed to buddy.
This has one issue
* nr_free_pages() just contains partial free pages in the system,
which is not we expect.
Maybe mention that this will also help changing totalpage accounting.
Let's print the mem info after defer_init is done.
Signed-off-by: Wei Yang <richard.weiyang@xxxxxxxxx>
CC: David Hildenbrand <david@xxxxxxxxxx>
---
mm/mm_init.c | 104 +++++++++++++++++++++++++--------------------------
1 file changed, 52 insertions(+), 52 deletions(-)
diff --git a/mm/mm_init.c b/mm/mm_init.c
index f72b852bd5b8..34a6de20ef77 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -2322,6 +2322,57 @@ void set_zone_contiguous(struct zone *zone)
zone->contiguous = true;
}
+static void __init mem_init_print_info(void)
Can you avoid all that churn simply by doing a
static void __init mem_init_print_info(void);
somewhere at the beginning of this file?
+{
+ unsigned long physpages, codesize, datasize, rosize, bss_size;
+ unsigned long init_code_size, init_data_size;
+
+ physpages = get_num_physpages();
+ codesize = _etext - _stext;
+ datasize = _edata - _sdata;
+ rosize = __end_rodata - __start_rodata;
+ bss_size = __bss_stop - __bss_start;
+ init_data_size = __init_end - __init_begin;
+ init_code_size = _einittext - _sinittext;
+
+ /*
+ * Detect special cases and adjust section sizes accordingly:
+ * 1) .init.* may be embedded into .data sections
+ * 2) .init.text.* may be out of [__init_begin, __init_end],
+ * please refer to arch/tile/kernel/vmlinux.lds.S.
+ * 3) .rodata.* may be embedded into .text or .data sections.
+ */
+#define adj_init_size(start, end, size, pos, adj) \
+ do { \
+ if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
+ size -= adj; \
+ } while (0)
+
+ adj_init_size(__init_begin, __init_end, init_data_size,
+ _sinittext, init_code_size);
+ adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
+ adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
+ adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
+ adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
+
+#undef adj_init_size
+
+ pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
+#ifdef CONFIG_HIGHMEM
+ ", %luK highmem"
+#endif
+ ")\n",
+ K(nr_free_pages()), K(physpages),
+ codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K,
+ (init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K,
+ K(physpages - totalram_pages() - totalcma_pages),
+ K(totalcma_pages)
+#ifdef CONFIG_HIGHMEM
+ , K(totalhigh_pages())
+#endif
+ );
+}
+
void __init page_alloc_init_late(void)
{
struct zone *zone;
@@ -2348,6 +2399,7 @@ void __init page_alloc_init_late(void)
files_maxfiles_init();
#endif
Maybe add a comment like
/* Accounting of total+free memory is stable at this point. */
+ mem_init_print_info();
--
Cheers,
David / dhildenb