If the boot_cpuid is smaller than nr_cpus, it requires extra effort to ensure the boot_cpu is in cpu_present_mask. This can be achieved by reserving the last quota for the boot cpu. Note: the restriction on nr_cpus will be lifted with more effort in the next patch Signed-off-by: Pingfan Liu <piliu@xxxxxxxxxx> Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx> Cc: Nicholas Piggin <npiggin@xxxxxxxxx> Cc: Christophe Leroy <christophe.leroy@xxxxxxxxxx> Cc: Mahesh Salgaonkar <mahesh@xxxxxxxxxxxxx> Cc: Wen Xiong <wenxiong@xxxxxxxxxxxxx> Cc: Baoquan He <bhe@xxxxxxxxxx> Cc: Ming Lei <ming.lei@xxxxxxxxxx> Cc: kexec@xxxxxxxxxxxxxxxxxxx To: linuxppc-dev@xxxxxxxxxxxxxxxx --- arch/powerpc/kernel/setup-common.c | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index a07af8de6674..58a988c64dd2 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -456,8 +456,8 @@ struct interrupt_server_node { void __init smp_setup_cpu_maps(void) { struct device_node *dn; - int shift = 0, cpu = 0; - int j, nthreads = 1; + int terminate, shift = 0, cpu = 0; + int j, bt_thread = 0, nthreads = 1; int len; struct interrupt_server_node *intserv_node, *n; struct list_head *bt_node, head; @@ -520,6 +520,7 @@ void __init smp_setup_cpu_maps(void) for (j = 0 ; j < nthreads; j++) { if (be32_to_cpu(intserv[j]) == boot_cpu_hwid) { bt_node = &intserv_node->node; + bt_thread = j; found_boot_cpu = true; /* * Record the round-shift between dt @@ -539,11 +540,21 @@ void __init smp_setup_cpu_maps(void) /* Select the primary thread, the boot cpu's slibing, as the logic 0 */ list_add_tail(&head, bt_node); pr_info("the round shift between dt seq and the cpu logic number: %d\n", shift); + terminate = nr_cpu_ids; list_for_each_entry(intserv_node, &head, node) { + j = 0; + /* Choose a start point to cover the boot cpu */ + if (nr_cpu_ids - 1 < bt_thread) { + /* + * The processor core puts assumption on the thread id, + * not to breach the assumption. + */ + terminate = nr_cpu_ids - 1; + } avail = intserv_node->avail; nthreads = intserv_node->len / sizeof(int); - for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { + for (; j < nthreads && cpu < terminate; j++) { set_cpu_present(cpu, avail); set_cpu_possible(cpu, true); cpu_to_phys_id[cpu] = be32_to_cpu(intserv_node->intserv[j]); @@ -551,6 +562,14 @@ void __init smp_setup_cpu_maps(void) j, cpu, be32_to_cpu(intserv[j])); cpu++; } + /* Online the boot cpu */ + if (nr_cpu_ids - 1 < bt_thread) { + set_cpu_present(bt_thread, avail); + set_cpu_possible(bt_thread, true); + cpu_to_phys_id[bt_thread] = be32_to_cpu(intserv_node->intserv[bt_thread]); + DBG(" thread %d -> cpu %d (hard id %d)\n", + bt_thread, bt_thread, be32_to_cpu(intserv[bt_thread])); + } } list_for_each_entry_safe(intserv_node, n, &head, node) { -- 2.31.1 _______________________________________________ kexec mailing list kexec@xxxxxxxxxxxxxxxxxxx http://lists.infradead.org/mailman/listinfo/kexec