Hi Waiman, Thanks for the review! On 04/03/25 10:05, Waiman Long wrote: > On 3/4/25 3:40 AM, Juri Lelli wrote: > > Create wrappers for sched_domains_mutex so that it can transparently be > > used on both CONFIG_SMP and !CONFIG_SMP, as some function will need to > > do. > > > > Reported-by: Jon Hunter <jonathanh@xxxxxxxxxx> > > Fixes: 53916d5fd3c0 ("sched/deadline: Check bandwidth overflow earlier for hotplug") > > Signed-off-by: Juri Lelli <juri.lelli@xxxxxxxxxx> > > --- > > include/linux/sched.h | 2 ++ > > kernel/cgroup/cpuset.c | 4 ++-- > > kernel/sched/core.c | 4 ++-- > > kernel/sched/debug.c | 8 ++++---- > > kernel/sched/topology.c | 17 +++++++++++++++-- > > 5 files changed, 25 insertions(+), 10 deletions(-) > > > > diff --git a/include/linux/sched.h b/include/linux/sched.h > > index 9632e3318e0d..d5f8c161d852 100644 > > --- a/include/linux/sched.h > > +++ b/include/linux/sched.h > > @@ -383,6 +383,8 @@ enum uclamp_id { > > extern struct root_domain def_root_domain; > > extern struct mutex sched_domains_mutex; > > #endif > > +extern void sched_domains_mutex_lock(void); > > +extern void sched_domains_mutex_unlock(void); > > If all access to sched_domains_mutex is through the wrappers, we may not > need to expose sched_domains_mutex at all. Also it is more efficient for the > non-SMP case to put the wrappers inside the CONFIG_SMP block and define the > empty inline functions in the else part. > > > > struct sched_param { > > int sched_priority; > > diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c > > index 0f910c828973..f87526edb2a4 100644 > > --- a/kernel/cgroup/cpuset.c > > +++ b/kernel/cgroup/cpuset.c > > @@ -994,10 +994,10 @@ static void > > partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[], > > struct sched_domain_attr *dattr_new) > > { > > - mutex_lock(&sched_domains_mutex); > > + sched_domains_mutex_lock(); > > partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); > > dl_rebuild_rd_accounting(); > > - mutex_unlock(&sched_domains_mutex); > > + sched_domains_mutex_unlock(); > > } > > /* > > diff --git a/kernel/sched/core.c b/kernel/sched/core.c > > index 9aecd914ac69..7b14500d731b 100644 > > --- a/kernel/sched/core.c > > +++ b/kernel/sched/core.c > > @@ -8424,9 +8424,9 @@ void __init sched_init_smp(void) > > * CPU masks are stable and all blatant races in the below code cannot > > * happen. > > */ > > - mutex_lock(&sched_domains_mutex); > > + sched_domains_mutex_lock(); > > sched_init_domains(cpu_active_mask); > > - mutex_unlock(&sched_domains_mutex); > > + sched_domains_mutex_unlock(); > > /* Move init over to a non-isolated CPU */ > > if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0) > > diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c > > index ef047add7f9e..a0893a483d35 100644 > > --- a/kernel/sched/debug.c > > +++ b/kernel/sched/debug.c > > @@ -292,7 +292,7 @@ static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf, > > bool orig; > > cpus_read_lock(); > > - mutex_lock(&sched_domains_mutex); > > + sched_domains_mutex_lock(); > > orig = sched_debug_verbose; > > result = debugfs_write_file_bool(filp, ubuf, cnt, ppos); > > @@ -304,7 +304,7 @@ static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf, > > sd_dentry = NULL; > > } > > - mutex_unlock(&sched_domains_mutex); > > + sched_domains_mutex_unlock(); > > cpus_read_unlock(); > > return result; > > @@ -515,9 +515,9 @@ static __init int sched_init_debug(void) > > debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost); > > debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate); > > - mutex_lock(&sched_domains_mutex); > > + sched_domains_mutex_lock(); > > update_sched_domain_debugfs(); > > - mutex_unlock(&sched_domains_mutex); > > + sched_domains_mutex_unlock(); > > #endif > > #ifdef CONFIG_NUMA_BALANCING > > diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c > > index c49aea8c1025..e2b879ec9458 100644 > > --- a/kernel/sched/topology.c > > +++ b/kernel/sched/topology.c > > @@ -6,6 +6,19 @@ > > #include <linux/bsearch.h> > > DEFINE_MUTEX(sched_domains_mutex); > > +#ifdef CONFIG_SMP > > +void sched_domains_mutex_lock(void) > > +{ > > + mutex_lock(&sched_domains_mutex); > > +} > > +void sched_domains_mutex_unlock(void) > > +{ > > + mutex_unlock(&sched_domains_mutex); > > +} > > +#else > > +void sched_domains_mutex_lock(void) { } > > +void sched_domains_mutex_unlock(void) { } > > +#endif > > /* Protected by sched_domains_mutex: */ > > static cpumask_var_t sched_domains_tmpmask; > > @@ -2791,7 +2804,7 @@ void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[], > > void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], > > struct sched_domain_attr *dattr_new) > > { > > - mutex_lock(&sched_domains_mutex); > > + sched_domains_mutex_lock(); > > partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); > > - mutex_unlock(&sched_domains_mutex); > > + sched_domains_mutex_unlock(); > > } > > There are two "lockdep_assert_held(&sched_domains_mutex);" statements in > topology.c file and one in cpuset.c. That can be problematic in the non-SMP > case. Maybe another wrapper to do the assert? Yes, makes sense. Will modify as you suggest here and above. Best, Juri