[RFC PATCH 2/2] cpuset: Don't change the cpumask if the task changed it.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Upon changing the allowed CPUs of cgroup, all task within this cgroup
will get their CPU-mask updated to the new mask. Tasks that changed
their CPU-mask will get their mask changed without knowing. If task
restricted itself to subset of CPUs, there is no reason to change its
mask after a new CPU has been added or a CPU has been removed which was
not used by the task.

Skip CPU-mask updates if task's CPU-mask differs from the previous
CPU-mask of the cgroup (it was changed) and if this CPU-mask is a subset
of the requested new CPU mask.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx>
---
 kernel/cgroup/cpuset.c | 21 ++++++++++++++-------
 1 file changed, 14 insertions(+), 7 deletions(-)

diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 8d5126684f9e6..6d0d07148cfaa 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1206,7 +1206,7 @@ void rebuild_sched_domains(void)
  * effective cpuset's.  As this function is called with cpuset_rwsem held,
  * cpuset membership stays stable.
  */
-static void update_tasks_cpumask(struct cpuset *cs)
+static void update_tasks_cpumask(struct cpuset *cs, cpumask_var_t prev_mask)
 {
 	struct css_task_iter it;
 	struct task_struct *task;
@@ -1220,7 +1220,13 @@ static void update_tasks_cpumask(struct cpuset *cs)
 		if (top_cs && (task->flags & PF_KTHREAD) &&
 		    kthread_is_per_cpu(task))
 			continue;
-		set_cpus_allowed_ptr(task, cs->effective_cpus);
+		/*
+		 * Update if task's CPU-mask equals previous CPUset or if task's
+		 * CPU-mask has CPUs which are not part of the new CPUset
+		 */
+		if (!prev_mask || (cpumask_equal(&task->cpus_mask, prev_mask) ||
+				   !cpumask_subset(&task->cpus_mask, cs->effective_cpus)))
+			set_cpus_allowed_ptr(task, cs->effective_cpus);
 	}
 	css_task_iter_end(&it);
 }
@@ -1505,7 +1511,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
 	spin_unlock_irq(&callback_lock);
 
 	if (adding || deleting)
-		update_tasks_cpumask(parent);
+		update_tasks_cpumask(parent, NULL);
 
 	/*
 	 * Set or clear CS_SCHED_LOAD_BALANCE when partcmd_update, if necessary.
@@ -1639,6 +1645,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
 			cpumask_clear(cp->subparts_cpus);
 		}
 
+		cpumask_copy(tmp->addmask, cp->effective_cpus);
 		cpumask_copy(cp->effective_cpus, tmp->new_cpus);
 		if (cp->nr_subparts_cpus) {
 			/*
@@ -1657,7 +1664,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
 		WARN_ON(!is_in_v2_mode() &&
 			!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
 
-		update_tasks_cpumask(cp);
+		update_tasks_cpumask(cp, tmp->addmask);
 
 		/*
 		 * On legacy hierarchy, if the effective cpumask of any non-
@@ -2305,7 +2312,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
 		}
 	}
 
-	update_tasks_cpumask(parent);
+	update_tasks_cpumask(parent, NULL);
 
 	if (parent->child_ecpus_count)
 		update_sibling_cpumasks(parent, cs, &tmpmask);
@@ -3318,7 +3325,7 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
 	 * as the tasks will be migrated to an ancestor.
 	 */
 	if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
-		update_tasks_cpumask(cs);
+		update_tasks_cpumask(cs, NULL);
 	if (mems_updated && !nodes_empty(cs->mems_allowed))
 		update_tasks_nodemask(cs);
 
@@ -3355,7 +3362,7 @@ hotplug_update_tasks(struct cpuset *cs,
 	spin_unlock_irq(&callback_lock);
 
 	if (cpus_updated)
-		update_tasks_cpumask(cs);
+		update_tasks_cpumask(cs, NULL);
 	if (mems_updated)
 		update_tasks_nodemask(cs);
 }
-- 
2.37.2




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Security]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]     [Monitors]

  Powered by Linux