> > @@ -77,6 +77,8 @@ struct scan_control { > > > > int order; > > > > + int lumpy_reclaim; > > + > > Needs a comment explaining its role, please. Something like "direct > this reclaim run to perform lumpy reclaim"? > > A clearer name might be "lumpy_relcaim_mode"? > > Making it a `bool' would clarify things too. Sorry, I've missed your this review comment. How about this? --- mm/vmscan.c | 39 ++++++++++++++++++++++++--------------- 1 files changed, 24 insertions(+), 15 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 13d9546..c3bcdd4 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -77,7 +77,11 @@ struct scan_control { int order; - int lumpy_reclaim; + /* + * Intend to reclaim enough contenious memory rather than to reclaim + * enough amount memory. I.e, it's the mode for high order allocation. + */ + bool lumpy_reclaim_mode; /* Which cgroup do we reclaim from */ struct mem_cgroup *mem_cgroup; @@ -577,7 +581,7 @@ static enum page_references page_check_references(struct page *page, referenced_page = TestClearPageReferenced(page); /* Lumpy reclaim - ignore references */ - if (sc->lumpy_reclaim) + if (sc->lumpy_reclaim_mode) return PAGEREF_RECLAIM; /* @@ -1153,7 +1157,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, unsigned long nr_freed; unsigned long nr_active; unsigned int count[NR_LRU_LISTS] = { 0, }; - int mode = sc->lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE; + int mode = sc->lumpy_reclaim_mode ? ISOLATE_BOTH : ISOLATE_INACTIVE; unsigned long nr_anon; unsigned long nr_file; @@ -1206,7 +1210,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, * but that should be acceptable to the caller */ if (nr_freed < nr_taken && !current_is_kswapd() && - sc->lumpy_reclaim) { + sc->lumpy_reclaim_mode) { congestion_wait(BLK_RW_ASYNC, HZ/10); /* @@ -1609,6 +1613,21 @@ static unsigned long nr_scan_try_batch(unsigned long nr_to_scan, return nr; } +static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc) +{ + /* + * If we need a large contiguous chunk of memory, or have + * trouble getting a small set of contiguous pages, we + * will reclaim both active and inactive pages. + */ + if (sc->order > PAGE_ALLOC_COSTLY_ORDER) + sc->lumpy_reclaim_mode = 1; + else if (sc->order && priority < DEF_PRIORITY - 2) + sc->lumpy_reclaim_mode = 1; + else + sc->lumpy_reclaim_mode = 0; +} + /* * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. */ @@ -1645,17 +1664,7 @@ static void shrink_zone(int priority, struct zone *zone, &reclaim_stat->nr_saved_scan[l]); } - /* - * If we need a large contiguous chunk of memory, or have - * trouble getting a small set of contiguous pages, we - * will reclaim both active and inactive pages. - */ - if (sc->order > PAGE_ALLOC_COSTLY_ORDER) - sc->lumpy_reclaim = 1; - else if (sc->order && priority < DEF_PRIORITY - 2) - sc->lumpy_reclaim = 1; - else - sc->lumpy_reclaim = 0; + set_lumpy_reclaim_mode(priority, sc); while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || nr[LRU_INACTIVE_FILE]) { -- 1.6.5.2 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxxx For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>