This reverts commit f2a524d9ef5b2cf9b06c4a59374bb5efbf697ba0. With recent changes made to initialize the cma regions before the page tables are setup, commit f2a524d9ef5b ("of: reserved_mem: Restructure code to call reserved mem init functions earlier"), an issue was introduced where the initialization of the cma regions fail and are initialized as "non-reusable" regions instead of "reusable". This issue occurs because the device_node of the regions are not yet created by the time the cma regions are being initialized. The cma regions need to be initialized before the page tables are setup for them to be configured correctly as was realized in [1]. Hence, since the unflatten_devicetree APIs are not available until after the page tables have been setup, revert back to using the fdt APIs. This makes it possible to store a reference to each cma node in the reserved_mem array by the time it is needed in their init function. Since this fix is based on changes that were made to use the unflatten_devicetree APIs, revert it. The fix will be reapplied with the relevant changes needed to make use of the fdt APIs instead. [1] https://lore.kernel.org/all/20240610213403.GA1697364@thelio-3990X/ Signed-off-by: Oreoluwa Babatunde <quic_obabatun@xxxxxxxxxxx> --- drivers/of/fdt.c | 2 +- drivers/of/of_private.h | 2 +- drivers/of/of_reserved_mem.c | 83 +++++++++++++++--------------------- 3 files changed, 37 insertions(+), 50 deletions(-) diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index ea2dff0478c7..9cde2abd2fc0 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -1239,7 +1239,7 @@ void __init unflatten_device_tree(void) unittest_unflatten_overlay_base(); /* initialize the reserved memory regions */ - of_scan_reserved_mem_reg_nodes(); + of_init_reserved_mem(); } /** diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h index 7412aed903df..01b33c4b1e9f 100644 --- a/drivers/of/of_private.h +++ b/drivers/of/of_private.h @@ -181,7 +181,7 @@ static inline struct device_node *__of_get_dma_parent(const struct device_node * #endif int fdt_scan_reserved_mem(void); -void of_scan_reserved_mem_reg_nodes(void); +void of_init_reserved_mem(void); bool of_fdt_device_is_available(const void *blob, unsigned long node); diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index b31001728866..eb54490a0a11 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -97,8 +97,6 @@ static void __init alloc_reserved_mem_array(void) reserved_mem = new_array; } -static void __init of_init_reserved_mem_node(struct reserved_mem *rmem); - /* * of_reserved_mem_save_node() - save fdt node for second pass initialization */ @@ -117,12 +115,6 @@ static void __init of_reserved_mem_save_node(struct device_node *node, const cha rmem->base = base; rmem->size = size; - /* - * Run the region specific initialization function for the rmem - * node. - */ - of_init_reserved_mem_node(rmem); - reserved_mem_count++; return; } @@ -209,8 +201,6 @@ static int __init __fdt_reserved_mem_check_root(unsigned long node) return 0; } -static void __init __rmem_check_for_overlap(void); - /** * of_scan_reserved_mem_reg_nodes() - Store info for the "reg" defined * reserved memory regions. @@ -221,7 +211,7 @@ static void __init __rmem_check_for_overlap(void); * size are all stored in the reserved_mem array by calling the * of_reserved_mem_save_node() function. */ -void __init of_scan_reserved_mem_reg_nodes(void) +static void __init of_scan_reserved_mem_reg_nodes(void) { struct device_node *node, *child; phys_addr_t base, size; @@ -232,13 +222,6 @@ void __init of_scan_reserved_mem_reg_nodes(void) return; } - /* - * Before moving forward, allocate the exact size needed for the - * reserved_mem array and copy all previously saved contents - * into the new array if successful. - */ - alloc_reserved_mem_array(); - for_each_child_of_node(node, child) { int ret = 0; const char *uname; @@ -263,8 +246,6 @@ void __init of_scan_reserved_mem_reg_nodes(void) if (size) of_reserved_mem_save_node(child, uname, base, size); } - /* check for overlapping reserved regions */ - __rmem_check_for_overlap(); } static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname); @@ -545,38 +526,44 @@ static void __init __rmem_check_for_overlap(void) } /** - * of_init_reserved_mem_node() - Initialize a saved reserved memory region. - * @rmem: reserved_mem object of the memory region to be initialized. - * - * This function is used to call the region specific initialization - * function on the rmem object passed as an argument. The rmem object - * will contain the base address, size, node name, and device_node of - * the reserved memory region to be initialized. + * of_init_reserved_mem() - allocate and init all saved reserved memory regions */ -static void __init of_init_reserved_mem_node(struct reserved_mem *rmem) +void __init of_init_reserved_mem(void) { - int err; - bool nomap; - struct device_node *node = rmem->dev_node; + int i; + + alloc_reserved_mem_array(); - nomap = of_property_present(node, "no-map"); + of_scan_reserved_mem_reg_nodes(); - err = __reserved_mem_init_node(rmem); - if (err != 0 && err != -ENOENT) { - pr_info("node %s compatible matching fail\n", rmem->name); - if (nomap) - memblock_clear_nomap(rmem->base, rmem->size); - else - memblock_phys_free(rmem->base, rmem->size); - } else { - phys_addr_t end = rmem->base + rmem->size - 1; - bool reusable = of_property_present(node, "reusable"); - - pr_info("%pa..%pa (%lu KiB) %s %s %s\n", - &rmem->base, &end, (unsigned long)(rmem->size / SZ_1K), - nomap ? "nomap" : "map", - reusable ? "reusable" : "non-reusable", - rmem->name ? rmem->name : "unknown"); + /* check for overlapping reserved regions */ + __rmem_check_for_overlap(); + + for (i = 0; i < reserved_mem_count; i++) { + struct reserved_mem *rmem = &reserved_mem[i]; + struct device_node *node = rmem->dev_node; + int err = 0; + bool nomap; + + nomap = of_property_present(node, "no-map"); + + err = __reserved_mem_init_node(rmem); + if (err != 0 && err != -ENOENT) { + pr_info("node %s compatible matching fail\n", rmem->name); + if (nomap) + memblock_clear_nomap(rmem->base, rmem->size); + else + memblock_phys_free(rmem->base, rmem->size); + } else { + phys_addr_t end = rmem->base + rmem->size - 1; + bool reusable = of_property_present(node, "reusable"); + + pr_info("%pa..%pa (%lu KiB) %s %s %s\n", + &rmem->base, &end, (unsigned long)(rmem->size / SZ_1K), + nomap ? "nomap" : "map", + reusable ? "reusable" : "non-reusable", + rmem->name ? rmem->name : "unknown"); + } } } -- 2.34.1