From: Thomas Gleixner <tglx@xxxxxxxxxxxxx> commit 0bf871651211b58c7b19f40b746b646d5311e2ec upstream with a new handler which just separates the control flow of primary and secondary CPUs. Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Signed-off-by: Borislav Petkov (AMD) <bp@xxxxxxxxx> Link: https://lore.kernel.org/r/20231002115903.433704135@xxxxxxxxxxxxx Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/x86/kernel/cpu/microcode/core.c | 51 ++++++----------------------------- 1 file changed, 9 insertions(+), 42 deletions(-) --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -268,7 +268,7 @@ struct microcode_ctrl { }; static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl); -static atomic_t late_cpus_in, late_cpus_out; +static atomic_t late_cpus_in; static bool wait_for_cpus(atomic_t *cnt) { @@ -304,7 +304,7 @@ static bool wait_for_ctrl(void) return false; } -static __maybe_unused void load_secondary(unsigned int cpu) +static void load_secondary(unsigned int cpu) { unsigned int ctrl_cpu = this_cpu_read(ucode_ctrl.ctrl_cpu); enum ucode_state ret; @@ -339,7 +339,7 @@ static __maybe_unused void load_secondar this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE); } -static __maybe_unused void load_primary(unsigned int cpu) +static void load_primary(unsigned int cpu) { struct cpumask *secondaries = topology_sibling_cpumask(cpu); enum sibling_ctrl ctrl; @@ -376,46 +376,14 @@ static __maybe_unused void load_primary( static int load_cpus_stopped(void *unused) { - int cpu = smp_processor_id(); - enum ucode_state ret; - - /* - * Wait for all CPUs to arrive. A load will not be attempted unless all - * CPUs show up. - * */ - if (!wait_for_cpus(&late_cpus_in)) { - this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); - return 0; - } - - /* - * On an SMT system, it suffices to load the microcode on one sibling of - * the core because the microcode engine is shared between the threads. - * Synchronization still needs to take place so that no concurrent - * loading attempts happen on multiple threads of an SMT core. See - * below. - */ - if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu) - goto wait_for_siblings; + unsigned int cpu = smp_processor_id(); - ret = microcode_ops->apply_microcode(cpu); - this_cpu_write(ucode_ctrl.result, ret); - -wait_for_siblings: - if (!wait_for_cpus(&late_cpus_out)) - panic("Timeout during microcode update!\n"); - - /* - * At least one thread has completed update on each core. - * For others, simply call the update to make sure the - * per-cpu cpuinfo can be updated with right microcode - * revision. - */ - if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu) - return 0; + if (this_cpu_read(ucode_ctrl.ctrl_cpu) == cpu) + load_primary(cpu); + else + load_secondary(cpu); - ret = microcode_ops->apply_microcode(cpu); - this_cpu_write(ucode_ctrl.result, ret); + /* No point to wait here. The CPUs will all wait in stop_machine(). */ return 0; } @@ -429,7 +397,6 @@ static int load_late_stop_cpus(void) pr_err("You should switch to early loading, if possible.\n"); atomic_set(&late_cpus_in, num_online_cpus()); - atomic_set(&late_cpus_out, num_online_cpus()); /* * Take a snapshot before the microcode update in order to compare and Patches currently in stable-queue which might be from tglx@xxxxxxxxxxxxx are queue-6.6/x86-microcode-intel-cleanup-code-further.patch queue-6.6/x86-microcode-intel-simplify-scan_microcode.patch queue-6.6/x86-microcode-mop-up-early-loading-leftovers.patch queue-6.6/x86-microcode-provide-new-control-functions.patch queue-6.6/x86-microcode-intel-save-the-microcode-only-after-a-successful-late-load.patch queue-6.6/x86-microcode-32-move-early-loading-after-paging-enable.patch queue-6.6/x86-microcode-intel-simplify-early-loading.patch queue-6.6/x86-microcode-add-per-cpu-control-field.patch queue-6.6/x86-microcode-add-per-cpu-result-state.patch queue-6.6/x86-microcode-prepare-for-minimal-revision-check.patch queue-6.6/x86-microcode-intel-simplify-and-rename-generic_load_microcode.patch queue-6.6/x86-microcode-amd-get-rid-of-the-_load_microcode_amd-forward-declaration.patch queue-6.6/x86-microcode-handle-offline-cpus-correctly.patch queue-6.6/x86-microcode-get-rid-of-the-schedule-work-indirection.patch queue-6.6/x86-microcode-intel-rework-intel_cpu_collect_info.patch queue-6.6/x86-microcode-amd-use-cached-microcode-for-ap-load.patch queue-6.6/x86-microcode-replace-the-all-in-one-rendevous-handler.patch queue-6.6/x86-microcode-amd-add-get_patch_level.patch queue-6.6/x86-apic-provide-apic_force_nmi_on_cpu.patch queue-6.6/sched-core-prevent-rescheduling-when-interrupts-are-disabled.patch queue-6.6/x86-cpu-fix-warm-boot-hang-regression-on-amd-sc1100-.patch queue-6.6/x86-microcode-intel-rework-intel_find_matching_signature.patch queue-6.6/x86-microcode-remove-pointless-apply-invocation.patch queue-6.6/x86-microcode-intel-reuse-intel_cpu_collect_info.patch queue-6.6/x86-microcode-amd-use-correct-per-cpu-ucode_cpu_info.patch queue-6.6/x86-microcode-amd-cache-builtin-initrd-microcode-early.patch queue-6.6/x86-microcode-protect-against-instrumentation.patch queue-6.6/x86-microcode-clean-up-mc_cpu_down_prep.patch queue-6.6/x86-microcode-amd-cache-builtin-microcode-too.patch queue-6.6/x86-microcode-amd-merge-early_apply_microcode-into-its-single-callsite.patch queue-6.6/x86-microcode-rendezvous-and-load-in-nmi.patch queue-6.6/x86-microcode-intel-rip-out-mixed-stepping-support-for-intel-cpus.patch queue-6.6/x86-microcode-intel-unify-microcode-apply-functions.patch queue-6.6/intel_idle-handle-older-cpus-which-stop-the-tsc-in-deeper-c-states-correctly.patch queue-6.6/x86-microcode-intel-switch-to-kvmalloc.patch queue-6.6/x86-microcode-clarify-the-late-load-logic.patch queue-6.6/x86-microcode-sanitize-__wait_for_cpus.patch queue-6.6/x86-microcode-rework-early-revisions-reporting.patch queue-6.6/rcuref-plug-slowpath-race-in-rcuref_put.patch queue-6.6/x86-microcode-handle-nosmt-correctly.patch