Allocate memory on the appropriate node for the per-node copies of the kernel text, and copy the kernel text to that memory. Clean and invalidate the caches to the point of unification so that the copied text is correctly visible to the target node. Signed-off-by: Russell King (Oracle) <rmk+kernel@xxxxxxxxxxxxxxx> --- arch/arm64/mm/ktext.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/arch/arm64/mm/ktext.c b/arch/arm64/mm/ktext.c index 901f159c65e6..4c803b89fcfe 100644 --- a/arch/arm64/mm/ktext.c +++ b/arch/arm64/mm/ktext.c @@ -4,14 +4,23 @@ */ #include <linux/kernel.h> +#include <linux/memblock.h> +#include <linux/numa.h> #include <linux/pgtable.h> +#include <linux/string.h> +#include <asm/cacheflush.h> #include <asm/ktext.h> #include <asm/memory.h> +static void *kernel_texts[MAX_NUMNODES]; + +/* Allocate memory for the replicated kernel texts. */ void __init ktext_replication_init(void) { + size_t size = _etext - _stext; int kidx = pgd_index((phys_addr_t)KERNEL_START); + int nid; /* * If we've messed up and the kernel shares a L0 entry with the @@ -26,4 +35,16 @@ void __init ktext_replication_init(void) pr_warn("Kernel is located in the same L0 index as vmalloc - text replication disabled\n"); return; } + + for_each_node(nid) { + /* Nothing to do for node 0 */ + if (!nid) + continue; + + /* Allocate and copy initial kernel text for this node */ + kernel_texts[nid] = memblock_alloc_node(size, PAGE_SIZE, nid); + memcpy(kernel_texts[nid], _stext, size); + caches_clean_inval_pou((u64)kernel_texts[nid], + (u64)kernel_texts[nid] + size); + } } -- 2.30.2