Call the newly introduced housekeeping_exlude_isolcpus() function to exclude isolated CPUs from the selected housekeeping CPU masks. This is in addition to the exclusion of isolated CPUs from the workqueue unbound CPU mask. Almost all the existing housekeeping cpumasks can be referenced at run time. Right now all of them except HK_TYPE_TICK and HK_TYPE_MANAGED_IRQ will be updated in the creation, deletion and modification of isolated partitions. More investigation will be done on the other two types. Signed-off-by: Waiman Long <longman@xxxxxxxxxx> Acked-by: Tejun Heo <tj@xxxxxxxxxx> --- kernel/cgroup/cpuset.c | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 8b40df89c3c1..d3cf4b2e44c7 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -233,6 +233,15 @@ static bool have_boot_isolcpus; /* List of remote partition root children */ static struct list_head remote_children; +/* + * The following sets of housekeeping cpumasks can be referenced at run time + * and hence should be updated for CPU isolation. + */ +#define HOUSEKEEPING_FLAGS (BIT(HK_TYPE_TIMER) | BIT(HK_TYPE_RCU) |\ + BIT(HK_TYPE_SCHED) | BIT(HK_TYPE_MISC) |\ + BIT(HK_TYPE_DOMAIN) | BIT(HK_TYPE_WQ) |\ + BIT(HK_TYPE_KTHREAD)) + /* * A flag to force sched domain rebuild at the end of an operation while * inhibiting it in the intermediate stages when set. Currently it is only @@ -1588,7 +1597,15 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent, return isolcpus_updated; } -static void update_unbound_workqueue_cpumask(bool isolcpus_updated) +/** + * update_isolation_cpumasks - Update external isolation CPU masks + * @isolcpus_updated - @true if isolation CPU masks update needed + * + * The following external CPU masks will be updated if necessary: + * - workqueue unbound cpumask + * - housekeeping cpumasks + */ +static void update_isolation_cpumasks(bool isolcpus_updated) { int ret; @@ -1598,7 +1615,10 @@ static void update_unbound_workqueue_cpumask(bool isolcpus_updated) return; ret = workqueue_unbound_exclude_cpumask(isolated_cpus); - WARN_ON_ONCE(ret < 0); + if (WARN_ON_ONCE(ret < 0)) + return; + ret = housekeeping_exlude_isolcpus(isolated_cpus, HOUSEKEEPING_FLAGS); + WARN_ON_ONCE((ret < 0) && (ret != -EOPNOTSUPP)); } /** @@ -1681,7 +1701,7 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs, isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus); list_add(&cs->remote_sibling, &remote_children); spin_unlock_irq(&callback_lock); - update_unbound_workqueue_cpumask(isolcpus_updated); + update_isolation_cpumasks(isolcpus_updated); /* * Proprogate changes in top_cpuset's effective_cpus down the hierarchy. @@ -1717,7 +1737,7 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp) cs->prs_err = PERR_INVCPUS; reset_partition_data(cs); spin_unlock_irq(&callback_lock); - update_unbound_workqueue_cpumask(isolcpus_updated); + update_isolation_cpumasks(isolcpus_updated); /* * Proprogate changes in top_cpuset's effective_cpus down the hierarchy. @@ -1769,7 +1789,7 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask, if (deleting) isolcpus_updated += partition_xcpus_del(prs, NULL, tmp->delmask); spin_unlock_irq(&callback_lock); - update_unbound_workqueue_cpumask(isolcpus_updated); + update_isolation_cpumasks(isolcpus_updated); /* * Proprogate changes in top_cpuset's effective_cpus down the hierarchy. @@ -2140,7 +2160,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd, WARN_ON_ONCE(parent->nr_subparts < 0); } spin_unlock_irq(&callback_lock); - update_unbound_workqueue_cpumask(isolcpus_updated); + update_isolation_cpumasks(isolcpus_updated); if ((old_prs != new_prs) && (cmd == partcmd_update)) update_partition_exclusive(cs, new_prs); @@ -3193,7 +3213,7 @@ static int update_prstate(struct cpuset *cs, int new_prs) else if (new_xcpus_state) partition_xcpus_newstate(old_prs, new_prs, cs->effective_xcpus); spin_unlock_irq(&callback_lock); - update_unbound_workqueue_cpumask(new_xcpus_state); + update_isolation_cpumasks(new_xcpus_state); /* Force update if switching back to member */ update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0); -- 2.43.5