On Mon 13-05-13 09:12:35, Mel Gorman wrote: > In the past, kswapd makes a decision on whether to compact memory after the > pgdat was considered balanced. This more or less worked but it is late to > make such a decision and does not fit well now that kswapd makes a decision > whether to exit the zone scanning loop depending on reclaim progress. > > This patch will compact a pgdat if at least the requested number of pages > were reclaimed from unbalanced zones for a given priority. If any zone is > currently balanced, kswapd will not call compaction as it is expected the > necessary pages are already available. > > Signed-off-by: Mel Gorman <mgorman@xxxxxxx> > Acked-by: Johannes Weiner <hannes@xxxxxxxxxxx> Reviewed-by: Michal Hocko <mhocko@xxxxxxx> > --- > mm/vmscan.c | 59 ++++++++++++++++++++++++++++++----------------------------- > 1 file changed, 30 insertions(+), 29 deletions(-) > > diff --git a/mm/vmscan.c b/mm/vmscan.c > index 1c10ee5..cd09803 100644 > --- a/mm/vmscan.c > +++ b/mm/vmscan.c > @@ -2661,7 +2661,8 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, > */ > static bool kswapd_shrink_zone(struct zone *zone, > struct scan_control *sc, > - unsigned long lru_pages) > + unsigned long lru_pages, > + unsigned long *nr_attempted) > { > unsigned long nr_slab; > struct reclaim_state *reclaim_state = current->reclaim_state; > @@ -2677,6 +2678,9 @@ static bool kswapd_shrink_zone(struct zone *zone, > nr_slab = shrink_slab(&shrink, sc->nr_scanned, lru_pages); > sc->nr_reclaimed += reclaim_state->reclaimed_slab; > > + /* Account for the number of pages attempted to reclaim */ > + *nr_attempted += sc->nr_to_reclaim; > + > if (nr_slab == 0 && !zone_reclaimable(zone)) > zone->all_unreclaimable = 1; > > @@ -2724,7 +2728,9 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, > > do { > unsigned long lru_pages = 0; > + unsigned long nr_attempted = 0; > bool raise_priority = true; > + bool pgdat_needs_compaction = (order > 0); > > sc.nr_reclaimed = 0; > > @@ -2774,7 +2780,21 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, > for (i = 0; i <= end_zone; i++) { > struct zone *zone = pgdat->node_zones + i; > > + if (!populated_zone(zone)) > + continue; > + > lru_pages += zone_reclaimable_pages(zone); > + > + /* > + * If any zone is currently balanced then kswapd will > + * not call compaction as it is expected that the > + * necessary pages are already available. > + */ > + if (pgdat_needs_compaction && > + zone_watermark_ok(zone, order, > + low_wmark_pages(zone), > + *classzone_idx, 0)) > + pgdat_needs_compaction = false; > } > > /* > @@ -2843,7 +2863,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, > * already being scanned that high > * watermark would be met at 100% efficiency. > */ > - if (kswapd_shrink_zone(zone, &sc, lru_pages)) > + if (kswapd_shrink_zone(zone, &sc, lru_pages, > + &nr_attempted)) > raise_priority = false; > } > > @@ -2896,6 +2917,13 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, > break; > > /* > + * Compact if necessary and kswapd is reclaiming at least the > + * high watermark number of pages as requsted > + */ > + if (pgdat_needs_compaction && sc.nr_reclaimed > nr_attempted) > + compact_pgdat(pgdat, order); > + > + /* > * Raise priority if scanning rate is too low or there was no > * progress in reclaiming pages > */ > @@ -2904,33 +2932,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, > } while (sc.priority >= 0 && > !pgdat_balanced(pgdat, order, *classzone_idx)); > > - /* > - * If kswapd was reclaiming at a higher order, it has the option of > - * sleeping without all zones being balanced. Before it does, it must > - * ensure that the watermarks for order-0 on *all* zones are met and > - * that the congestion flags are cleared. The congestion flag must > - * be cleared as kswapd is the only mechanism that clears the flag > - * and it is potentially going to sleep here. > - */ > - if (order) { > - int zones_need_compaction = 1; > - > - for (i = 0; i <= end_zone; i++) { > - struct zone *zone = pgdat->node_zones + i; > - > - if (!populated_zone(zone)) > - continue; > - > - /* Check if the memory needs to be defragmented. */ > - if (zone_watermark_ok(zone, order, > - low_wmark_pages(zone), *classzone_idx, 0)) > - zones_need_compaction = 0; > - } > - > - if (zones_need_compaction) > - compact_pgdat(pgdat, order); > - } > - > out: > /* > * Return the order we were reclaiming at so prepare_kswapd_sleep() > -- > 1.8.1.4 > -- Michal Hocko SUSE Labs -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>