shrink_page_list() reclaims the pages. But the statistical data may be inaccurate since some pages are CMA pages. If kernel needs to reclaim unmovable memory (GFP_KERNEL flag), free CMA pages should not be counted in nr_reclaimed pages. v2: * Remove #ifdef CONFIG_CMA. Use IS_ENABLED() & is_migrate_cma() instead. Signed-off-by: Haojian Zhuang <haojian.zhuang@xxxxxxxxx> --- mm/vmscan.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/mm/vmscan.c b/mm/vmscan.c index 2cff0d4..414f74f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -720,6 +720,9 @@ static unsigned long shrink_page_list(struct list_head *page_list, unsigned long nr_reclaimed = 0; unsigned long nr_writeback = 0; unsigned long nr_immediate = 0; + /* Number of pages freed with MIGRATE_CMA type */ + unsigned long nr_reclaimed_cma = 0; + int mt = 0; cond_resched(); @@ -987,6 +990,9 @@ static unsigned long shrink_page_list(struct list_head *page_list, * leave it off the LRU). */ nr_reclaimed++; + mt = get_pageblock_migratetype(page); + if (is_migrate_cma(mt)) + nr_reclaimed_cma++; continue; } } @@ -1005,6 +1011,9 @@ static unsigned long shrink_page_list(struct list_head *page_list, __clear_page_locked(page); free_it: nr_reclaimed++; + mt = get_pageblock_migratetype(page); + if (is_migrate_cma(mt)) + nr_reclaimed_cma++; /* * Is there need to periodically free_page_list? It would @@ -1044,6 +1053,11 @@ keep: *ret_nr_unqueued_dirty += nr_unqueued_dirty; *ret_nr_writeback += nr_writeback; *ret_nr_immediate += nr_immediate; + if (IS_ENABLED(CONFIG_CMA)) { + mt = allocflags_to_migratetype(sc->gfp_mask); + if (mt == MIGRATE_UNMOVABLE) + nr_reclaimed -= nr_reclaimed_cma; + } return nr_reclaimed; } -- 1.8.1.2 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>