On Sat, Jul 06, 2013 at 12:09:01AM +0100, Mel Gorman wrote: > +/* Returns true if the given node is compute overloaded */ > +static bool sched_numa_overloaded(int nid) > +{ > + int nr_cpus = 0; > + int nr_preferred = 0; > + int i; > + > + for_each_cpu(i, cpumask_of_node(nid)) { > + nr_cpus++; > + nr_preferred += cpu_rq(i)->nr_preferred_running; > + } > + > + return nr_preferred >= nr_cpus << 1; > +} > + > static void task_numa_placement(struct task_struct *p) > { > int seq, nid, max_nid = 0; > @@ -908,7 +935,7 @@ static void task_numa_placement(struct task_struct *p) > > /* Find maximum private faults */ > faults = p->numa_faults[task_faults_idx(nid, 1)]; > - if (faults > max_faults) { > + if (faults > max_faults && !sched_numa_overloaded(nid)) { > max_faults = faults; > max_nid = nid; > } This again very explicitly breaks for overloaded scenarios. -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>