The patch titled mm: vmscan: convert lumpy_mode into a bitmask has been added to the -mm tree. Its filename is mm-vmscan-convert-lumpy_mode-into-a-bitmask.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/SubmitChecklist when testing your code *** See http://userweb.kernel.org/~akpm/stuff/added-to-mm.txt to find out what to do about this The current -mm tree may be found at http://userweb.kernel.org/~akpm/mmotm/ ------------------------------------------------------ Subject: mm: vmscan: convert lumpy_mode into a bitmask From: Mel Gorman <mel@xxxxxxxxx> Currently lumpy_mode is an enum and determines if lumpy reclaim is off, syncronous or asyncronous. In preparation for using compaction instead of lumpy reclaim, this patch converts the flags into a bitmap. Signed-off-by: Mel Gorman <mel@xxxxxxxxx> Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx> Cc: KOSAKI Motohiro <kosaki.motohiro@xxxxxxxxxxxxxx> Cc: Rik van Riel <riel@xxxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Andy Whitcroft <apw@xxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/trace/events/vmscan.h | 6 ++-- mm/vmscan.c | 46 +++++++++++++++++++------------- 2 files changed, 31 insertions(+), 21 deletions(-) diff -puN include/trace/events/vmscan.h~mm-vmscan-convert-lumpy_mode-into-a-bitmask include/trace/events/vmscan.h --- a/include/trace/events/vmscan.h~mm-vmscan-convert-lumpy_mode-into-a-bitmask +++ a/include/trace/events/vmscan.h @@ -25,13 +25,13 @@ #define trace_reclaim_flags(page, sync) ( \ (page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \ - (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ + (sync & LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ ) #define trace_shrink_flags(file, sync) ( \ - (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_MIXED : \ + (sync & LUMPY_MODE_SYNC ? RECLAIM_WB_MIXED : \ (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) | \ - (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ + (sync & LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ ) TRACE_EVENT(mm_vmscan_kswapd_sleep, diff -puN mm/vmscan.c~mm-vmscan-convert-lumpy_mode-into-a-bitmask mm/vmscan.c --- a/mm/vmscan.c~mm-vmscan-convert-lumpy_mode-into-a-bitmask +++ a/mm/vmscan.c @@ -51,11 +51,20 @@ #define CREATE_TRACE_POINTS #include <trace/events/vmscan.h> -enum lumpy_mode { - LUMPY_MODE_NONE, - LUMPY_MODE_ASYNC, - LUMPY_MODE_SYNC, -}; +/* + * lumpy_mode determines how the inactive list is shrunk + * LUMPY_MODE_SINGLE: Reclaim only order-0 pages + * LUMPY_MODE_ASYNC: Do not block + * LUMPY_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback + * LUMPY_MODE_CONTIGRECLAIM: For high-order allocations, take a reference + * page from the LRU and reclaim all pages within a + * naturally aligned range + */ +typedef unsigned __bitwise__ lumpy_mode; +#define LUMPY_MODE_SINGLE ((__force lumpy_mode)0x01u) +#define LUMPY_MODE_ASYNC ((__force lumpy_mode)0x02u) +#define LUMPY_MODE_SYNC ((__force lumpy_mode)0x04u) +#define LUMPY_MODE_CONTIGRECLAIM ((__force lumpy_mode)0x08u) struct scan_control { /* Incremented by the number of inactive pages that were scanned */ @@ -88,7 +97,7 @@ struct scan_control { * Intend to reclaim enough continuous memory rather than reclaim * enough amount of memory. i.e, mode for high order allocation. */ - enum lumpy_mode lumpy_reclaim_mode; + lumpy_mode lumpy_reclaim_mode; /* Which cgroup do we reclaim from */ struct mem_cgroup *mem_cgroup; @@ -274,13 +283,13 @@ unsigned long shrink_slab(unsigned long static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc, bool sync) { - enum lumpy_mode mode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC; + lumpy_mode syncmode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC; /* * Some reclaim have alredy been failed. No worth to try synchronous * lumpy reclaim. */ - if (sync && sc->lumpy_reclaim_mode == LUMPY_MODE_NONE) + if (sync && sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE) return; /* @@ -288,17 +297,18 @@ static void set_lumpy_reclaim_mode(int p * trouble getting a small set of contiguous pages, we * will reclaim both active and inactive pages. */ + sc->lumpy_reclaim_mode = LUMPY_MODE_CONTIGRECLAIM; if (sc->order > PAGE_ALLOC_COSTLY_ORDER) - sc->lumpy_reclaim_mode = mode; + sc->lumpy_reclaim_mode |= syncmode; else if (sc->order && priority < DEF_PRIORITY - 2) - sc->lumpy_reclaim_mode = mode; + sc->lumpy_reclaim_mode |= syncmode; else - sc->lumpy_reclaim_mode = LUMPY_MODE_NONE; + sc->lumpy_reclaim_mode = LUMPY_MODE_SINGLE | LUMPY_MODE_ASYNC; } static void disable_lumpy_reclaim_mode(struct scan_control *sc) { - sc->lumpy_reclaim_mode = LUMPY_MODE_NONE; + sc->lumpy_reclaim_mode = LUMPY_MODE_SINGLE | LUMPY_MODE_ASYNC; } static inline int is_page_cache_freeable(struct page *page) @@ -429,7 +439,7 @@ static pageout_t pageout(struct page *pa * first attempt to free a range of pages fails. */ if (PageWriteback(page) && - sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC) + (sc->lumpy_reclaim_mode & LUMPY_MODE_SYNC)) wait_on_page_writeback(page); if (!PageWriteback(page)) { @@ -615,7 +625,7 @@ static enum page_references page_check_r referenced_page = TestClearPageReferenced(page); /* Lumpy reclaim - ignore references */ - if (sc->lumpy_reclaim_mode != LUMPY_MODE_NONE) + if (sc->lumpy_reclaim_mode & LUMPY_MODE_CONTIGRECLAIM) return PAGEREF_RECLAIM; /* @@ -732,7 +742,7 @@ static unsigned long shrink_page_list(st * for any page for which writeback has already * started. */ - if (sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC && + if ((sc->lumpy_reclaim_mode & LUMPY_MODE_SYNC) && may_enter_fs) wait_on_page_writeback(page); else { @@ -1317,7 +1327,7 @@ static inline bool should_reclaim_stall( return false; /* Only stall on lumpy reclaim */ - if (sc->lumpy_reclaim_mode == LUMPY_MODE_NONE) + if (sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE) return false; /* If we have relaimed everything on the isolated list, no stall */ @@ -1368,7 +1378,7 @@ shrink_inactive_list(unsigned long nr_to if (scanning_global_lru(sc)) { nr_taken = isolate_pages_global(nr_to_scan, &page_list, &nr_scanned, sc->order, - sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ? + sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE ? ISOLATE_INACTIVE : ISOLATE_BOTH, zone, 0, file); zone->pages_scanned += nr_scanned; @@ -1381,7 +1391,7 @@ shrink_inactive_list(unsigned long nr_to } else { nr_taken = mem_cgroup_isolate_pages(nr_to_scan, &page_list, &nr_scanned, sc->order, - sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ? + sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE ? ISOLATE_INACTIVE : ISOLATE_BOTH, zone, sc->mem_cgroup, 0, file); _ Patches currently in -mm which might be from mel@xxxxxxxxx are linux-next.patch mm-remove-call-to-find_vma-in-pagewalk-for-non-hugetlbfs.patch mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds.patch mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds-fix.patch mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds-update.patch mm-vmstat-use-a-single-setter-function-and-callback-for-adjusting-percpu-thresholds-fix-set_pgdat_percpu_threshold-dont-use-for_each_online_cpu.patch writeback-io-less-balance_dirty_pages.patch writeback-consolidate-variable-names-in-balance_dirty_pages.patch writeback-per-task-rate-limit-on-balance_dirty_pages.patch writeback-per-task-rate-limit-on-balance_dirty_pages-fix.patch writeback-prevent-duplicate-balance_dirty_pages_ratelimited-calls.patch writeback-account-per-bdi-accumulated-written-pages.patch writeback-bdi-write-bandwidth-estimation.patch writeback-bdi-write-bandwidth-estimation-fix.patch writeback-show-bdi-write-bandwidth-in-debugfs.patch writeback-quit-throttling-when-bdi-dirty-pages-dropped-low.patch writeback-reduce-per-bdi-dirty-threshold-ramp-up-time.patch writeback-make-reasonable-gap-between-the-dirty-background-thresholds.patch writeback-scale-down-max-throttle-bandwidth-on-concurrent-dirtiers.patch writeback-add-trace-event-for-balance_dirty_pages.patch writeback-make-nr_to_write-a-per-file-limit.patch writeback-make-nr_to_write-a-per-file-limit-fix.patch vmscan-factor-out-kswapd-sleeping-logic-from-kswapd.patch mm-compaction-add-trace-events-for-memory-compaction-activity.patch mm-vmscan-convert-lumpy_mode-into-a-bitmask.patch mm-vmscan-reclaim-order-0-and-use-compaction-instead-of-lumpy-reclaim.patch mm-migration-allow-migration-to-operate-asynchronously-and-avoid-synchronous-compaction-in-the-faster-path.patch mm-migration-cleanup-migrate_pages-api-by-matching-types-for-offlining-and-sync.patch mm-compaction-perform-a-faster-migration-scan-when-migrating-asynchronously.patch mm-vmscan-rename-lumpy_mode-to-reclaim_mode.patch add-debugging-aid-for-memory-initialisation-problems.patch -- To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html