On Thu, Dec 20, 2012 at 7:25 AM, Zlatko Calusic <zlatko.calusic@xxxxxxxx> wrote: > static unsigned long balance_pgdat(pg_data_t *pgdat, int order, > int *classzone_idx) > { > - int all_zones_ok; > + struct zone *unbalanced_zone; nit: less hunks if not erase that mark Hillf > unsigned long balanced; > int i; > int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ > @@ -2580,7 +2580,7 @@ loop_again: > unsigned long lru_pages = 0; > int has_under_min_watermark_zone = 0; > > - all_zones_ok = 1; > + unbalanced_zone = NULL; > balanced = 0; > > /* > @@ -2719,7 +2719,7 @@ loop_again: > } > > if (!zone_balanced(zone, testorder, 0, end_zone)) { > - all_zones_ok = 0; > + unbalanced_zone = zone; > /* > * We are still under min water mark. This > * means that we have a GFP_ATOMIC allocation > @@ -2752,7 +2752,7 @@ loop_again: > pfmemalloc_watermark_ok(pgdat)) > wake_up(&pgdat->pfmemalloc_wait); > > - if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx))) > + if (!unbalanced_zone || (order && pgdat_balanced(pgdat, balanced, *classzone_idx))) > break; /* kswapd: all done */ > /* > * OK, kswapd is getting into trouble. Take a nap, then take > @@ -2762,7 +2762,7 @@ loop_again: > if (has_under_min_watermark_zone) > count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT); > else > - congestion_wait(BLK_RW_ASYNC, HZ/10); > + wait_iff_congested(unbalanced_zone, BLK_RW_ASYNC, HZ/10); > } > > /* > @@ -2781,7 +2781,7 @@ out: > * high-order: Balanced zones must make up at least 25% of the node > * for the node to be balanced > */ > - if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) { > + if (unbalanced_zone && (!order || !pgdat_balanced(pgdat, balanced, *classzone_idx))) { > cond_resched(); > > try_to_freeze(); > -- 1.7.10.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>