Introduce infrastructure function housekeeping_update() to change housekeeping_cpumask during runtime and adjust affinities of depended subsystems. Affinity adjustments of subsystems follow in subsequent patches. Parent patch: "sched/isolation: Exclude dynamically isolated CPUs from housekeeping masks" https://lore.kernel.org/lkml/20240229021414.508972-2-longman@xxxxxxxxxx/ Test example for cgroup2: cd /sys/fs/cgroup/ echo +cpuset > cgroup.subtree_control mkdir test echo isolated > test/cpuset.cpus.partition echo $isolate > test/cpuset.cpus Signed-off-by: Costa Shulyupin <costa.shul@xxxxxxxxxx> --- kernel/sched/isolation.c | 48 +++++++++++++++++++++++++++++++++++----- 1 file changed, 43 insertions(+), 5 deletions(-) diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index 948b9ee0dc2cc..036e48f0e7d1b 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -116,6 +116,39 @@ static void __init housekeeping_setup_type(enum hk_type type, housekeeping_staging); } +/* + * housekeeping_update - change housekeeping.cpumasks[type] and propagate the + * change. + * + * Assuming cpuset_mutex is held in sched_partition_write or + * cpuset_write_resmask. + */ +static int housekeeping_update(enum hk_type type, cpumask_var_t update) +{ + struct { + struct cpumask changed; + struct cpumask enable; + struct cpumask disable; + } *masks; + + masks = kmalloc(sizeof(*masks), GFP_KERNEL); + if (!masks) + return -ENOMEM; + + lockdep_assert_cpus_held(); + cpumask_xor(&masks->changed, housekeeping_cpumask(type), update); + cpumask_and(&masks->enable, &masks->changed, update); + cpumask_andnot(&masks->disable, &masks->changed, update); + cpumask_copy(housekeeping.cpumasks[type], update); + housekeeping.flags |= BIT(type); + if (!static_branch_unlikely(&housekeeping_overridden)) + static_key_enable_cpuslocked(&housekeeping_overridden.key); + + kfree(masks); + + return 0; +} + static int __init housekeeping_setup(char *str, unsigned long flags) { cpumask_var_t non_housekeeping_mask, housekeeping_staging; @@ -314,9 +347,12 @@ int housekeeping_exlude_isolcpus(const struct cpumask *isolcpus, unsigned long f /* * Reset housekeeping to bootup default */ - for_each_set_bit(type, &housekeeping_boot.flags, HK_TYPE_MAX) - cpumask_copy(housekeeping.cpumasks[type], - housekeeping_boot.cpumasks[type]); + for_each_set_bit(type, &housekeeping_boot.flags, HK_TYPE_MAX) { + int err = housekeeping_update(type, housekeeping_boot.cpumasks[type]); + + if (err) + return err; + } WRITE_ONCE(housekeeping.flags, housekeeping_boot.flags); if (!housekeeping_boot.flags && @@ -344,9 +380,11 @@ int housekeeping_exlude_isolcpus(const struct cpumask *isolcpus, unsigned long f cpumask_andnot(tmp_mask, src_mask, isolcpus); if (!cpumask_intersects(tmp_mask, cpu_online_mask)) return -EINVAL; /* Invalid isolated CPUs */ - cpumask_copy(housekeeping.cpumasks[type], tmp_mask); + int err = housekeeping_update(type, tmp_mask); + + if (err) + return err; } - WRITE_ONCE(housekeeping.flags, housekeeping_boot.flags | flags); excluded = true; if (!static_branch_unlikely(&housekeeping_overridden)) static_key_enable_cpuslocked(&housekeeping_overridden.key); -- 2.45.0