The patch titled Subject: memblock: add KHO support for reserve_mem has been added to the -mm mm-nonmm-unstable branch. Its filename is memblock-add-kho-support-for-reserve_mem.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/memblock-add-kho-support-for-reserve_mem.patch This patch will later appear in the mm-nonmm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Alexander Graf <graf@xxxxxxxxxx> Subject: memblock: add KHO support for reserve_mem Date: Thu, 6 Feb 2025 15:27:53 +0200 Linux has recently gained support for "reserve_mem": A mechanism to allocate a region of memory early enough in boot that we can cross our fingers and hope it stays at the same location during most boots, so we can store for example ftrace buffers into it. Thanks to KASLR, we can never be really sure that "reserve_mem" allocations are static across kexec. Let's teach it KHO awareness so that it serializes its reservations on kexec exit and deserializes them again on boot, preserving the exact same mapping across kexec. This is an example user for KHO in the KHO patch set to ensure we have at least one (not very controversial) user in the tree before extending KHO's use to more subsystems. Link: https://lkml.kernel.org/r/20250206132754.2596694-14-rppt@xxxxxxxxxx Signed-off-by: Alexander Graf <graf@xxxxxxxxxx> Co-developed-by: Mike Rapoport (Microsoft) <rppt@xxxxxxxxxx> Signed-off-by: Mike Rapoport (Microsoft) <rppt@xxxxxxxxxx> Cc: Andy Lutomirski <luto@xxxxxxxxxx> Cc: Anthony Yznaga <anthony.yznaga@xxxxxxxxxx> Cc: Arnd Bergmann <arnd@xxxxxxxx> Cc: Ashish Kalra <ashish.kalra@xxxxxxx> Cc: Ben Herrenschmidt <benh@xxxxxxxxxxxxxxxxxxx> Cc: Borislav Betkov <bp@xxxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: David Woodhouse <dwmw2@xxxxxxxxxxxxx> Cc: Eric Biederman <ebiederm@xxxxxxxxxxxx> Cc: "H. Peter Anvin" <hpa@xxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: James Gowans <jgowans@xxxxxxxxxx> Cc: Jonathan Corbet <corbet@xxxxxxx> Cc: Krzysztof Kozlowski <krzk@xxxxxxxxxx> Cc: Mark Rutland <mark.rutland@xxxxxxx> Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx> Cc: Pasha Tatashin <pasha.tatashin@xxxxxxxxxx> Cc: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Cc: Pratyush Yadav <ptyadav@xxxxxxxxx> Cc: Rob Herring <robh+dt@xxxxxxxxxx> Cc: Rob Herring <robh@xxxxxxxxxx> Cc: Saravana Kannan <saravanak@xxxxxxxxxx> Cc: Stanislav Kinsburskii <skinsburskii@xxxxxxxxxxxxxxxxxxx> Cc: Steven Rostedt (VMware) <rostedt@xxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Tom Lendacky <thomas.lendacky@xxxxxxx> Cc: Usama Arif <usama.arif@xxxxxxxxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/memblock.c | 131 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 131 insertions(+) --- a/mm/memblock.c~memblock-add-kho-support-for-reserve_mem +++ a/mm/memblock.c @@ -16,6 +16,9 @@ #include <linux/kmemleak.h> #include <linux/seq_file.h> #include <linux/memblock.h> +#include <linux/kexec_handover.h> +#include <linux/kexec.h> +#include <linux/libfdt.h> #include <asm/sections.h> #include <linux/io.h> @@ -2423,6 +2426,70 @@ int reserve_mem_find_by_name(const char } EXPORT_SYMBOL_GPL(reserve_mem_find_by_name); +static bool __init reserve_mem_kho_revive(const char *name, phys_addr_t size, + phys_addr_t align) +{ + const void *fdt = kho_get_fdt(); + const char *path = "/reserve_mem"; + int node, child, err; + + if (!IS_ENABLED(CONFIG_KEXEC_HANDOVER)) + return false; + + if (!fdt) + return false; + + node = fdt_path_offset(fdt, "/reserve_mem"); + if (node < 0) + return false; + + err = fdt_node_check_compatible(fdt, node, "reserve_mem-v1"); + if (err) { + pr_warn("Node '%s' has unknown compatible", path); + return false; + } + + fdt_for_each_subnode(child, fdt, node) { + const struct kho_mem *mem; + const char *child_name; + int len; + + /* Search for old kernel's reserved_mem with the same name */ + child_name = fdt_get_name(fdt, child, NULL); + if (strcmp(name, child_name)) + continue; + + err = fdt_node_check_compatible(fdt, child, "reserve_mem_map-v1"); + if (err) { + pr_warn("Node '%s/%s' has unknown compatible", path, name); + continue; + } + + mem = fdt_getprop(fdt, child, "mem", &len); + if (!mem || len != sizeof(*mem)) + continue; + + if (mem->addr & (align - 1)) { + pr_warn("KHO reserved_mem '%s' has wrong alignment (0x%lx, 0x%lx)", + name, (long)align, (long)mem->addr); + continue; + } + + if (mem->size != size) { + pr_warn("KHO reserved_mem '%s' has wrong size (0x%lx != 0x%lx)", + name, (long)mem->size, (long)size); + continue; + } + + reserved_mem_add(mem->addr, mem->size, name); + pr_info("Revived memory reservation '%s' from KHO", name); + + return true; + } + + return false; +} + /* * Parse reserve_mem=nn:align:name */ @@ -2478,6 +2545,11 @@ static int __init reserve_mem(char *p) if (reserve_mem_find_by_name(name, &start, &tmp)) return -EBUSY; + /* Pick previous allocations up from KHO if available */ + if (reserve_mem_kho_revive(name, size, align)) + return 1; + + /* TODO: Allocation must be outside of scratch region */ start = memblock_phys_alloc(size, align); if (!start) return -ENOMEM; @@ -2488,6 +2560,65 @@ static int __init reserve_mem(char *p) } __setup("reserve_mem=", reserve_mem); +static int reserve_mem_kho_write_map(void *fdt, struct reserve_mem_table *map) +{ + int err = 0; + const char compatible[] = "reserve_mem_map-v1"; + struct kho_mem mem = { + .addr = map->start, + .size = map->size, + }; + + err |= fdt_begin_node(fdt, map->name); + err |= fdt_property(fdt, "compatible", compatible, sizeof(compatible)); + err |= fdt_property(fdt, "mem", &mem, sizeof(mem)); + err |= fdt_end_node(fdt); + + return err; +} + +static int reserve_mem_kho_notifier(struct notifier_block *self, + unsigned long cmd, void *v) +{ + const char compatible[] = "reserve_mem-v1"; + void *fdt = v; + int err = 0; + int i; + + switch (cmd) { + case KEXEC_KHO_ABORT: + return NOTIFY_DONE; + case KEXEC_KHO_DUMP: + /* Handled below */ + break; + default: + return NOTIFY_BAD; + } + + if (!reserved_mem_count) + return NOTIFY_DONE; + + err |= fdt_begin_node(fdt, "reserve_mem"); + err |= fdt_property(fdt, "compatible", compatible, sizeof(compatible)); + for (i = 0; i < reserved_mem_count; i++) + err |= reserve_mem_kho_write_map(fdt, &reserved_mem_table[i]); + err |= fdt_end_node(fdt); + + return err ? NOTIFY_BAD : NOTIFY_DONE; +} + +static struct notifier_block reserve_mem_kho_nb = { + .notifier_call = reserve_mem_kho_notifier, +}; + +static int __init reserve_mem_init(void) +{ + register_kho_notifier(&reserve_mem_kho_nb); + + return 0; +} +core_initcall(reserve_mem_init); + #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK) static const char * const flagname[] = { [ilog2(MEMBLOCK_HOTPLUG)] = "HOTPLUG", _ Patches currently in -mm which might be from graf@xxxxxxxxxx are memblock-add-support-for-scratch-memory.patch kexec-add-kexec-handover-kho-generation-helpers.patch kexec-add-kho-parsing-support.patch kexec-add-kho-support-to-kexec-file-loads.patch kexec-add-config-option-for-kho.patch kexec-add-documentation-for-kho.patch arm64-add-kho-support.patch x86-add-kho-support.patch memblock-add-kho-support-for-reserve_mem.patch