From: Peter Zijlstra <peterz@xxxxxxxxxxxxx> commit 91eaed0d61319f58a9f8e43d41a8cbb069b4f73d upstream. Now that the first group will always be the previous domain of this @cpu this can be simplified. In fact, writing the code now removed should've been a big clue I was doing it wrong :/ Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Mike Galbraith <efault@xxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: linux-kernel@xxxxxxxxxxxxxxx Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> Signed-off-by: Mel Gorman <mgorman@xxxxxxx> --- kernel/sched/topology.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 921dedde2ee1..6b10e0a956c7 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -557,7 +557,7 @@ static void init_overlap_sched_group(struct sched_domain *sd, static int build_overlap_sched_groups(struct sched_domain *sd, int cpu) { - struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg; + struct sched_group *first = NULL, *last = NULL, *sg; const struct cpumask *span = sched_domain_span(sd); struct cpumask *covered = sched_domains_tmpmask; struct sd_data *sdd = sd->private; @@ -587,15 +587,6 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) init_overlap_sched_group(sd, sg, i); - /* - * Make sure the first group of this domain contains the - * canonical balance CPU. Otherwise the sched_domain iteration - * breaks. See update_sg_lb_stats(). - */ - if ((!groups && cpumask_test_cpu(cpu, sg_span)) || - group_balance_cpu(sg) == cpu) - groups = sg; - if (!first) first = sg; if (last) @@ -603,7 +594,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) last = sg; last->next = first; } - sd->groups = groups; + sd->groups = first; return 0; -- 2.13.1