On Thu, May 25 2023 at 01:56, Michael Ellerman wrote: > #ifdef CONFIG_HOTPLUG_SMT > enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; > +static unsigned int cpu_smt_max_threads __ro_after_init; > +unsigned int cpu_smt_num_threads; Why needs this to be global? cpu_smt_control is pointlessly global already. > void __init cpu_smt_disable(bool force) > { > @@ -433,10 +435,18 @@ void __init cpu_smt_disable(bool force) > * The decision whether SMT is supported can only be done after the full > * CPU identification. Called from architecture code. > */ > -void __init cpu_smt_check_topology(void) > +void __init cpu_smt_check_topology(unsigned int num_threads) > { > if (!topology_smt_supported()) > cpu_smt_control = CPU_SMT_NOT_SUPPORTED; > + > + cpu_smt_max_threads = num_threads; > + > + // May already be disabled by nosmt command line parameter > + if (cpu_smt_control != CPU_SMT_ENABLED) > + cpu_smt_num_threads = 1; > + else > + cpu_smt_num_threads = num_threads; Taking Laurents findings into account this should be something like the incomplete below. x86 would simply invoke cpu_smt_set_num_threads() with both arguments as smp_num_siblings while PPC can funnel its command line parameter through the num_threads argument. Thanks, tglx --- --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -414,6 +414,8 @@ void __weak arch_smt_update(void) { } #ifdef CONFIG_HOTPLUG_SMT enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; +static unsigned int cpu_smt_max_threads __ro_after_init; +static unsigned int cpu_smt_num_threads = UINT_MAX; void __init cpu_smt_disable(bool force) { @@ -427,24 +429,31 @@ void __init cpu_smt_disable(bool force) pr_info("SMT: disabled\n"); cpu_smt_control = CPU_SMT_DISABLED; } + cpu_smt_num_threads = 1; } /* * The decision whether SMT is supported can only be done after the full * CPU identification. Called from architecture code. */ -void __init cpu_smt_check_topology(void) +void __init cpu_smt_set_num_threads(unsigned int max_threads, unsigned int num_threads) { - if (!topology_smt_supported()) + if (max_threads == 1) cpu_smt_control = CPU_SMT_NOT_SUPPORTED; -} -static int __init smt_cmdline_disable(char *str) -{ - cpu_smt_disable(str && !strcmp(str, "force")); - return 0; + cpu_smt_max_threads = max_threads; + + /* + * If SMT has been disabled via the kernel command line or SMT is + * not supported, set cpu_smt_num_threads to 1 for consistency. + * If enabled, take the architecture requested number of threads + * to bring up into account. + */ + if (cpu_smt_control != CPU_SMT_ENABLED) + cpu_smt_num_threads = 1; + else if (num_threads < cpu_smt_num_threads) + cpu_smt_num_threads = num_threads; } -early_param("nosmt", smt_cmdline_disable); static inline bool cpu_smt_allowed(unsigned int cpu) { @@ -463,6 +472,13 @@ static inline bool cpu_smt_allowed(unsig return !cpumask_test_cpu(cpu, &cpus_booted_once_mask); } +static int __init smt_cmdline_disable(char *str) +{ + cpu_smt_disable(str && !strcmp(str, "force")); + return 0; +} +early_param("nosmt", smt_cmdline_disable); + /* Returns true if SMT is not supported of forcefully (irreversibly) disabled */ bool cpu_smt_possible(void) {