3.16.49-rc1 review patch. If anyone has any objections, please let me know. ------------------ From: Lauro Ramos Venancio <lvenanci@xxxxxxxxxx> commit 8c0334697dc37eb3d6d7632304d3a3662248daac upstream. Create functions build_group_from_child_sched_domain() and init_overlap_sched_group(). No functional change. Signed-off-by: Lauro Ramos Venancio <lvenanci@xxxxxxxxxx> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Acked-by: Rik van Riel <riel@xxxxxxxxxx> Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Mike Galbraith <efault@xxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Link: http://lkml.kernel.org/r/1492091769-19879-2-git-send-email-lvenanci@xxxxxxxxxx Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> [bwh: Backported to 3.16: - Ccode being moved is slightly different - Adjust filename] Signed-off-by: Ben Hutchings <ben@xxxxxxxxxxxxxxx> --- kernel/sched/core.c | 62 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 43 insertions(+), 19 deletions(-) --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5801,6 +5801,47 @@ int group_balance_cpu(struct sched_group return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); } +static struct sched_group * +build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) +{ + struct sched_group *sg; + struct cpumask *sg_span; + + sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), + GFP_KERNEL, cpu_to_node(cpu)); + + if (!sg) + return NULL; + + sg_span = sched_group_cpus(sg); + if (sd->child) + cpumask_copy(sg_span, sched_domain_span(sd->child)); + else + cpumask_copy(sg_span, sched_domain_span(sd)); + + return sg; +} + +static void init_overlap_sched_group(struct sched_domain *sd, + struct sched_group *sg, int cpu) +{ + struct sd_data *sdd = sd->private; + struct cpumask *sg_span; + + sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); + if (atomic_inc_return(&sg->sgc->ref) == 1) + build_group_mask(sd, sg); + + /* + * Initialize sgc->capacity such that even if we mess up the + * domains and no possible iteration will get us here, we won't + * die on a /0 trap. + */ + sg_span = sched_group_cpus(sg); + sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); + sg->sgc->capacity_orig = sg->sgc->capacity; +} + static int build_overlap_sched_groups(struct sched_domain *sd, int cpu) { @@ -5825,31 +5866,14 @@ build_overlap_sched_groups(struct sched_ if (!cpumask_test_cpu(i, sched_domain_span(sibling))) continue; - sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), - GFP_KERNEL, cpu_to_node(cpu)); - + sg = build_group_from_child_sched_domain(sibling, cpu); if (!sg) goto fail; sg_span = sched_group_cpus(sg); - if (sibling->child) - cpumask_copy(sg_span, sched_domain_span(sibling->child)); - else - cpumask_set_cpu(i, sg_span); - cpumask_or(covered, covered, sg_span); - sg->sgc = *per_cpu_ptr(sdd->sgc, i); - if (atomic_inc_return(&sg->sgc->ref) == 1) - build_group_mask(sd, sg); - - /* - * Initialize sgc->capacity such that even if we mess up the - * domains and no possible iteration will get us here, we won't - * die on a /0 trap. - */ - sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); - sg->sgc->capacity_orig = sg->sgc->capacity; + init_overlap_sched_group(sd, sg, i); /* * Make sure the first group of this domain contains the