> @@ -3207,15 +3228,14 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) > sc.may_writepage = 1; > > /* > - * Now scan the zone in the dma->highmem direction, stopping > - * at the last zone which needs scanning. > - * > - * We do this because the page allocator works in the opposite > - * direction. This prevents the page allocator from allocating > - * pages behind kswapd's direction of progress, which would > - * cause too much scanning of the lower zones. > + * Continue scanning in the highmem->dma direction stopping at > + * the last zone which needs scanning. This may reclaim lowmem > + * pages that are not necessary for zone balancing but it > + * preserves LRU ordering. It is assumed that the bulk of > + * allocation requests can use arbitrary zones with the > + * possible exception of big highmem:lowmem configurations. > */ > - for (i = 0; i <= end_zone; i++) { > + for (i = end_zone; i >= end_zone; i--) { s/i >= end_zone;/i >= 0;/ ? > struct zone *zone = pgdat->node_zones + i; > > if (!populated_zone(zone)) -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>