tree: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master head: 303392fd5c160822bf778270b28ec5ea50cab2b4 commit: b4e0b68fbd9d1fd7e31cbe8adca3ad6cf556e2ee mm: memcontrol: use obj_cgroup APIs to charge kmem pages date: 9 weeks ago config: powerpc64-randconfig-r031-20210704 (attached as .config) compiler: clang version 13.0.0 (https://github.com/llvm/llvm-project cb5de7c813f976dd458bd2a7f40702ba648bf650) reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # install powerpc64 cross compiling tool for clang build # apt-get install binutils-powerpc64-linux-gnu # https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=b4e0b68fbd9d1fd7e31cbe8adca3ad6cf556e2ee git remote add linus https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git git fetch --no-tags linus master git checkout b4e0b68fbd9d1fd7e31cbe8adca3ad6cf556e2ee # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=powerpc64 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@xxxxxxxxx> All warnings (new ones prefixed by >>): In file included from mm/compaction.c:11: In file included from include/linux/cpu.h:17: In file included from include/linux/node.h:18: In file included from include/linux/device.h:15: In file included from include/linux/dev_printk.h:16: In file included from include/linux/ratelimit.h:6: In file included from include/linux/sched.h:12: In file included from arch/powerpc/include/asm/current.h:13: In file included from arch/powerpc/include/asm/paca.h:17: In file included from arch/powerpc/include/asm/lppaca.h:46: In file included from arch/powerpc/include/asm/mmu.h:147: In file included from include/linux/bug.h:5: In file included from arch/powerpc/include/asm/bug.h:109: In file included from include/asm-generic/bug.h:20: In file included from include/linux/kernel.h:11: In file included from include/linux/bitops.h:32: In file included from arch/powerpc/include/asm/bitops.h:62: arch/powerpc/include/asm/barrier.h:49:9: warning: '__lwsync' macro redefined [-Wmacro-redefined] #define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") ^ <built-in>:309:9: note: previous definition is here #define __lwsync __builtin_ppc_lwsync ^ >> mm/compaction.c:799:1: warning: stack frame size (2080) exceeds limit (2048) in function 'isolate_migratepages_block' [-Wframe-larger-than] isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, ^ 2 warnings generated. vim +/isolate_migratepages_block +799 mm/compaction.c 748446bb6b5a93 Mel Gorman 2010-05-24 779 2fe86e00040761 Michal Nazarewicz 2012-01-30 780 /** edc2ca61249679 Vlastimil Babka 2014-10-09 781 * isolate_migratepages_block() - isolate all migrate-able pages within edc2ca61249679 Vlastimil Babka 2014-10-09 782 * a single pageblock 2fe86e00040761 Michal Nazarewicz 2012-01-30 783 * @cc: Compaction control structure. edc2ca61249679 Vlastimil Babka 2014-10-09 784 * @low_pfn: The first PFN to isolate edc2ca61249679 Vlastimil Babka 2014-10-09 785 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock edc2ca61249679 Vlastimil Babka 2014-10-09 786 * @isolate_mode: Isolation mode to be used. 2fe86e00040761 Michal Nazarewicz 2012-01-30 787 * 2fe86e00040761 Michal Nazarewicz 2012-01-30 788 * Isolate all pages that can be migrated from the range specified by edc2ca61249679 Vlastimil Babka 2014-10-09 789 * [low_pfn, end_pfn). The range is expected to be within same pageblock. edc2ca61249679 Vlastimil Babka 2014-10-09 790 * Returns zero if there is a fatal signal pending, otherwise PFN of the edc2ca61249679 Vlastimil Babka 2014-10-09 791 * first page that was not scanned (which may be both less, equal to or more edc2ca61249679 Vlastimil Babka 2014-10-09 792 * than end_pfn). 2fe86e00040761 Michal Nazarewicz 2012-01-30 793 * edc2ca61249679 Vlastimil Babka 2014-10-09 794 * The pages are isolated on cc->migratepages list (not required to be empty), edc2ca61249679 Vlastimil Babka 2014-10-09 795 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field edc2ca61249679 Vlastimil Babka 2014-10-09 796 * is neither read nor updated. 748446bb6b5a93 Mel Gorman 2010-05-24 797 */ edc2ca61249679 Vlastimil Babka 2014-10-09 798 static unsigned long edc2ca61249679 Vlastimil Babka 2014-10-09 @799 isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, edc2ca61249679 Vlastimil Babka 2014-10-09 800 unsigned long end_pfn, isolate_mode_t isolate_mode) 748446bb6b5a93 Mel Gorman 2010-05-24 801 { 5f438eee8f2e97 Andrey Ryabinin 2019-03-05 802 pg_data_t *pgdat = cc->zone->zone_pgdat; b7aba6984dc048 Mel Gorman 2011-01-13 803 unsigned long nr_scanned = 0, nr_isolated = 0; fa9add641b1b1c Hugh Dickins 2012-05-29 804 struct lruvec *lruvec; b8b2d825323633 Xiubo Li 2014-10-09 805 unsigned long flags = 0; 6168d0da2b479c Alex Shi 2020-12-15 806 struct lruvec *locked = NULL; bb13ffeb9f6bfe Mel Gorman 2012-10-08 807 struct page *page = NULL, *valid_page = NULL; e34d85f0e3c60f Joonsoo Kim 2015-02-11 808 unsigned long start_pfn = low_pfn; fdd048e12c9a46 Vlastimil Babka 2016-05-19 809 bool skip_on_failure = false; fdd048e12c9a46 Vlastimil Babka 2016-05-19 810 unsigned long next_skip_pfn = 0; e380bebe477154 Mel Gorman 2019-03-05 811 bool skip_updated = false; 748446bb6b5a93 Mel Gorman 2010-05-24 812 748446bb6b5a93 Mel Gorman 2010-05-24 813 /* 748446bb6b5a93 Mel Gorman 2010-05-24 814 * Ensure that there are not too many pages isolated from the LRU 748446bb6b5a93 Mel Gorman 2010-05-24 815 * list by either parallel reclaimers or compaction. If there are, 748446bb6b5a93 Mel Gorman 2010-05-24 816 * delay for some time until fewer pages are isolated 748446bb6b5a93 Mel Gorman 2010-05-24 817 */ 5f438eee8f2e97 Andrey Ryabinin 2019-03-05 818 while (unlikely(too_many_isolated(pgdat))) { d20bdd571ee5c9 Zi Yan 2020-11-13 819 /* stop isolation if there are still pages not migrated */ d20bdd571ee5c9 Zi Yan 2020-11-13 820 if (cc->nr_migratepages) d20bdd571ee5c9 Zi Yan 2020-11-13 821 return 0; d20bdd571ee5c9 Zi Yan 2020-11-13 822 f9e35b3b41f47c Mel Gorman 2011-06-15 823 /* async migration should just abort */ e0b9daeb453e60 David Rientjes 2014-06-04 824 if (cc->mode == MIGRATE_ASYNC) 2fe86e00040761 Michal Nazarewicz 2012-01-30 825 return 0; f9e35b3b41f47c Mel Gorman 2011-06-15 826 748446bb6b5a93 Mel Gorman 2010-05-24 827 congestion_wait(BLK_RW_ASYNC, HZ/10); 748446bb6b5a93 Mel Gorman 2010-05-24 828 748446bb6b5a93 Mel Gorman 2010-05-24 829 if (fatal_signal_pending(current)) 2fe86e00040761 Michal Nazarewicz 2012-01-30 830 return 0; 748446bb6b5a93 Mel Gorman 2010-05-24 831 } 748446bb6b5a93 Mel Gorman 2010-05-24 832 cf66f0700c8f1d Mel Gorman 2019-03-05 833 cond_resched(); aeef4b83806f49 David Rientjes 2014-06-04 834 fdd048e12c9a46 Vlastimil Babka 2016-05-19 835 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { fdd048e12c9a46 Vlastimil Babka 2016-05-19 836 skip_on_failure = true; fdd048e12c9a46 Vlastimil Babka 2016-05-19 837 next_skip_pfn = block_end_pfn(low_pfn, cc->order); fdd048e12c9a46 Vlastimil Babka 2016-05-19 838 } fdd048e12c9a46 Vlastimil Babka 2016-05-19 839 748446bb6b5a93 Mel Gorman 2010-05-24 840 /* Time to isolate some pages for migration */ 748446bb6b5a93 Mel Gorman 2010-05-24 841 for (; low_pfn < end_pfn; low_pfn++) { 29c0dde830f8c0 Vlastimil Babka 2015-09-08 842 fdd048e12c9a46 Vlastimil Babka 2016-05-19 843 if (skip_on_failure && low_pfn >= next_skip_pfn) { fdd048e12c9a46 Vlastimil Babka 2016-05-19 844 /* fdd048e12c9a46 Vlastimil Babka 2016-05-19 845 * We have isolated all migration candidates in the fdd048e12c9a46 Vlastimil Babka 2016-05-19 846 * previous order-aligned block, and did not skip it due fdd048e12c9a46 Vlastimil Babka 2016-05-19 847 * to failure. We should migrate the pages now and fdd048e12c9a46 Vlastimil Babka 2016-05-19 848 * hopefully succeed compaction. fdd048e12c9a46 Vlastimil Babka 2016-05-19 849 */ fdd048e12c9a46 Vlastimil Babka 2016-05-19 850 if (nr_isolated) fdd048e12c9a46 Vlastimil Babka 2016-05-19 851 break; fdd048e12c9a46 Vlastimil Babka 2016-05-19 852 fdd048e12c9a46 Vlastimil Babka 2016-05-19 853 /* fdd048e12c9a46 Vlastimil Babka 2016-05-19 854 * We failed to isolate in the previous order-aligned fdd048e12c9a46 Vlastimil Babka 2016-05-19 855 * block. Set the new boundary to the end of the fdd048e12c9a46 Vlastimil Babka 2016-05-19 856 * current block. Note we can't simply increase fdd048e12c9a46 Vlastimil Babka 2016-05-19 857 * next_skip_pfn by 1 << order, as low_pfn might have fdd048e12c9a46 Vlastimil Babka 2016-05-19 858 * been incremented by a higher number due to skipping fdd048e12c9a46 Vlastimil Babka 2016-05-19 859 * a compound or a high-order buddy page in the fdd048e12c9a46 Vlastimil Babka 2016-05-19 860 * previous loop iteration. fdd048e12c9a46 Vlastimil Babka 2016-05-19 861 */ fdd048e12c9a46 Vlastimil Babka 2016-05-19 862 next_skip_pfn = block_end_pfn(low_pfn, cc->order); fdd048e12c9a46 Vlastimil Babka 2016-05-19 863 } fdd048e12c9a46 Vlastimil Babka 2016-05-19 864 8b44d2791f9125 Vlastimil Babka 2014-10-09 865 /* 8b44d2791f9125 Vlastimil Babka 2014-10-09 866 * Periodically drop the lock (if held) regardless of its 670105a25608af Mel Gorman 2019-08-02 867 * contention, to give chance to IRQs. Abort completely if 670105a25608af Mel Gorman 2019-08-02 868 * a fatal signal is pending. 8b44d2791f9125 Vlastimil Babka 2014-10-09 869 */ 6168d0da2b479c Alex Shi 2020-12-15 870 if (!(low_pfn % SWAP_CLUSTER_MAX)) { 6168d0da2b479c Alex Shi 2020-12-15 871 if (locked) { 6168d0da2b479c Alex Shi 2020-12-15 872 unlock_page_lruvec_irqrestore(locked, flags); 6168d0da2b479c Alex Shi 2020-12-15 873 locked = NULL; 6168d0da2b479c Alex Shi 2020-12-15 874 } 6168d0da2b479c Alex Shi 2020-12-15 875 6168d0da2b479c Alex Shi 2020-12-15 876 if (fatal_signal_pending(current)) { 6168d0da2b479c Alex Shi 2020-12-15 877 cc->contended = true; 6168d0da2b479c Alex Shi 2020-12-15 878 670105a25608af Mel Gorman 2019-08-02 879 low_pfn = 0; 670105a25608af Mel Gorman 2019-08-02 880 goto fatal_pending; 670105a25608af Mel Gorman 2019-08-02 881 } b2eef8c0d09101 Andrea Arcangeli 2011-03-22 882 6168d0da2b479c Alex Shi 2020-12-15 883 cond_resched(); 6168d0da2b479c Alex Shi 2020-12-15 884 } 6168d0da2b479c Alex Shi 2020-12-15 885 748446bb6b5a93 Mel Gorman 2010-05-24 886 if (!pfn_valid_within(low_pfn)) fdd048e12c9a46 Vlastimil Babka 2016-05-19 887 goto isolate_fail; b7aba6984dc048 Mel Gorman 2011-01-13 888 nr_scanned++; 748446bb6b5a93 Mel Gorman 2010-05-24 889 748446bb6b5a93 Mel Gorman 2010-05-24 890 page = pfn_to_page(low_pfn); dc9086004b3d5d Mel Gorman 2012-02-08 891 e380bebe477154 Mel Gorman 2019-03-05 892 /* e380bebe477154 Mel Gorman 2019-03-05 893 * Check if the pageblock has already been marked skipped. e380bebe477154 Mel Gorman 2019-03-05 894 * Only the aligned PFN is checked as the caller isolates e380bebe477154 Mel Gorman 2019-03-05 895 * COMPACT_CLUSTER_MAX at a time so the second call must e380bebe477154 Mel Gorman 2019-03-05 896 * not falsely conclude that the block should be skipped. e380bebe477154 Mel Gorman 2019-03-05 897 */ e380bebe477154 Mel Gorman 2019-03-05 898 if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) { e380bebe477154 Mel Gorman 2019-03-05 899 if (!cc->ignore_skip_hint && get_pageblock_skip(page)) { e380bebe477154 Mel Gorman 2019-03-05 900 low_pfn = end_pfn; 9df41314390b81 Alex Shi 2020-12-15 901 page = NULL; e380bebe477154 Mel Gorman 2019-03-05 902 goto isolate_abort; e380bebe477154 Mel Gorman 2019-03-05 903 } bb13ffeb9f6bfe Mel Gorman 2012-10-08 904 valid_page = page; e380bebe477154 Mel Gorman 2019-03-05 905 } bb13ffeb9f6bfe Mel Gorman 2012-10-08 906 c122b2087ab941 Joonsoo Kim 2014-04-07 907 /* 99c0fd5e51c447 Vlastimil Babka 2014-10-09 908 * Skip if free. We read page order here without zone lock 99c0fd5e51c447 Vlastimil Babka 2014-10-09 909 * which is generally unsafe, but the race window is small and 99c0fd5e51c447 Vlastimil Babka 2014-10-09 910 * the worst thing that can happen is that we skip some 99c0fd5e51c447 Vlastimil Babka 2014-10-09 911 * potential isolation targets. 99c0fd5e51c447 Vlastimil Babka 2014-10-09 912 */ 99c0fd5e51c447 Vlastimil Babka 2014-10-09 913 if (PageBuddy(page)) { ab130f9108dcf2 Matthew Wilcox (Oracle 2020-10-15 914) unsigned long freepage_order = buddy_order_unsafe(page); 99c0fd5e51c447 Vlastimil Babka 2014-10-09 915 99c0fd5e51c447 Vlastimil Babka 2014-10-09 916 /* 99c0fd5e51c447 Vlastimil Babka 2014-10-09 917 * Without lock, we cannot be sure that what we got is 99c0fd5e51c447 Vlastimil Babka 2014-10-09 918 * a valid page order. Consider only values in the 99c0fd5e51c447 Vlastimil Babka 2014-10-09 919 * valid order range to prevent low_pfn overflow. c122b2087ab941 Joonsoo Kim 2014-04-07 920 */ 99c0fd5e51c447 Vlastimil Babka 2014-10-09 921 if (freepage_order > 0 && freepage_order < MAX_ORDER) 99c0fd5e51c447 Vlastimil Babka 2014-10-09 922 low_pfn += (1UL << freepage_order) - 1; c122b2087ab941 Joonsoo Kim 2014-04-07 923 continue; 99c0fd5e51c447 Vlastimil Babka 2014-10-09 924 } 9927af740b1b9b Mel Gorman 2011-01-13 925 bc835011afbea3 Andrea Arcangeli 2011-01-13 926 /* 29c0dde830f8c0 Vlastimil Babka 2015-09-08 927 * Regardless of being on LRU, compound pages such as THP and 1da2f328fa643b Rik van Riel 2020-04-01 928 * hugetlbfs are not to be compacted unless we are attempting 1da2f328fa643b Rik van Riel 2020-04-01 929 * an allocation much larger than the huge page size (eg CMA). 1da2f328fa643b Rik van Riel 2020-04-01 930 * We can potentially save a lot of iterations if we skip them 1da2f328fa643b Rik van Riel 2020-04-01 931 * at once. The check is racy, but we can consider only valid 1da2f328fa643b Rik van Riel 2020-04-01 932 * values and the only danger is skipping too much. bc835011afbea3 Andrea Arcangeli 2011-01-13 933 */ 1da2f328fa643b Rik van Riel 2020-04-01 934 if (PageCompound(page) && !cc->alloc_contig) { 21dc7e023611fb David Rientjes 2017-11-17 935 const unsigned int order = compound_order(page); edc2ca61249679 Vlastimil Babka 2014-10-09 936 d3c85bad89b915 Vlastimil Babka 2017-11-17 937 if (likely(order < MAX_ORDER)) 21dc7e023611fb David Rientjes 2017-11-17 938 low_pfn += (1UL << order) - 1; fdd048e12c9a46 Vlastimil Babka 2016-05-19 939 goto isolate_fail; 2a1402aa044b55 Mel Gorman 2012-10-08 940 } 2a1402aa044b55 Mel Gorman 2012-10-08 941 bda807d4445414 Minchan Kim 2016-07-26 942 /* bda807d4445414 Minchan Kim 2016-07-26 943 * Check may be lockless but that's ok as we recheck later. bda807d4445414 Minchan Kim 2016-07-26 944 * It's possible to migrate LRU and non-lru movable pages. bda807d4445414 Minchan Kim 2016-07-26 945 * Skip any other type of page bda807d4445414 Minchan Kim 2016-07-26 946 */ bda807d4445414 Minchan Kim 2016-07-26 947 if (!PageLRU(page)) { bda807d4445414 Minchan Kim 2016-07-26 948 /* bda807d4445414 Minchan Kim 2016-07-26 949 * __PageMovable can return false positive so we need bda807d4445414 Minchan Kim 2016-07-26 950 * to verify it under page_lock. bda807d4445414 Minchan Kim 2016-07-26 951 */ bda807d4445414 Minchan Kim 2016-07-26 952 if (unlikely(__PageMovable(page)) && bda807d4445414 Minchan Kim 2016-07-26 953 !PageIsolated(page)) { bda807d4445414 Minchan Kim 2016-07-26 954 if (locked) { 6168d0da2b479c Alex Shi 2020-12-15 955 unlock_page_lruvec_irqrestore(locked, flags); 6168d0da2b479c Alex Shi 2020-12-15 956 locked = NULL; bda807d4445414 Minchan Kim 2016-07-26 957 } bda807d4445414 Minchan Kim 2016-07-26 958 9e5bcd610ffced Yisheng Xie 2017-02-24 959 if (!isolate_movable_page(page, isolate_mode)) bda807d4445414 Minchan Kim 2016-07-26 960 goto isolate_success; bda807d4445414 Minchan Kim 2016-07-26 961 } bda807d4445414 Minchan Kim 2016-07-26 962 fdd048e12c9a46 Vlastimil Babka 2016-05-19 963 goto isolate_fail; bda807d4445414 Minchan Kim 2016-07-26 964 } 29c0dde830f8c0 Vlastimil Babka 2015-09-08 965 119d6d59dcc098 David Rientjes 2014-04-03 966 /* 119d6d59dcc098 David Rientjes 2014-04-03 967 * Migration will fail if an anonymous page is pinned in memory, 119d6d59dcc098 David Rientjes 2014-04-03 968 * so avoid taking lru_lock and isolating it unnecessarily in an 119d6d59dcc098 David Rientjes 2014-04-03 969 * admittedly racy check. 119d6d59dcc098 David Rientjes 2014-04-03 970 */ 119d6d59dcc098 David Rientjes 2014-04-03 971 if (!page_mapping(page) && 119d6d59dcc098 David Rientjes 2014-04-03 972 page_count(page) > page_mapcount(page)) fdd048e12c9a46 Vlastimil Babka 2016-05-19 973 goto isolate_fail; 119d6d59dcc098 David Rientjes 2014-04-03 974 73e64c51afc56d Michal Hocko 2016-12-14 975 /* 73e64c51afc56d Michal Hocko 2016-12-14 976 * Only allow to migrate anonymous pages in GFP_NOFS context 73e64c51afc56d Michal Hocko 2016-12-14 977 * because those do not depend on fs locks. 73e64c51afc56d Michal Hocko 2016-12-14 978 */ 73e64c51afc56d Michal Hocko 2016-12-14 979 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) 73e64c51afc56d Michal Hocko 2016-12-14 980 goto isolate_fail; 73e64c51afc56d Michal Hocko 2016-12-14 981 9df41314390b81 Alex Shi 2020-12-15 982 /* 9df41314390b81 Alex Shi 2020-12-15 983 * Be careful not to clear PageLRU until after we're 9df41314390b81 Alex Shi 2020-12-15 984 * sure the page is not being freed elsewhere -- the 9df41314390b81 Alex Shi 2020-12-15 985 * page release code relies on it. 9df41314390b81 Alex Shi 2020-12-15 986 */ 9df41314390b81 Alex Shi 2020-12-15 987 if (unlikely(!get_page_unless_zero(page))) 9df41314390b81 Alex Shi 2020-12-15 988 goto isolate_fail; 9df41314390b81 Alex Shi 2020-12-15 989 c2135f7c570bc2 Alex Shi 2021-02-24 990 if (!__isolate_lru_page_prepare(page, isolate_mode)) 9df41314390b81 Alex Shi 2020-12-15 991 goto isolate_fail_put; 9df41314390b81 Alex Shi 2020-12-15 992 9df41314390b81 Alex Shi 2020-12-15 993 /* Try isolate the page */ 9df41314390b81 Alex Shi 2020-12-15 994 if (!TestClearPageLRU(page)) 9df41314390b81 Alex Shi 2020-12-15 995 goto isolate_fail_put; 9df41314390b81 Alex Shi 2020-12-15 996 6168d0da2b479c Alex Shi 2020-12-15 997 lruvec = mem_cgroup_page_lruvec(page, pgdat); 6168d0da2b479c Alex Shi 2020-12-15 998 69b7189f12e006 Vlastimil Babka 2014-10-09 999 /* If we already hold the lock, we can skip some rechecking */ 6168d0da2b479c Alex Shi 2020-12-15 1000 if (lruvec != locked) { 6168d0da2b479c Alex Shi 2020-12-15 1001 if (locked) 6168d0da2b479c Alex Shi 2020-12-15 1002 unlock_page_lruvec_irqrestore(locked, flags); 6168d0da2b479c Alex Shi 2020-12-15 1003 6168d0da2b479c Alex Shi 2020-12-15 1004 compact_lock_irqsave(&lruvec->lru_lock, &flags, cc); 6168d0da2b479c Alex Shi 2020-12-15 1005 locked = lruvec; 6168d0da2b479c Alex Shi 2020-12-15 1006 6168d0da2b479c Alex Shi 2020-12-15 1007 lruvec_memcg_debug(lruvec, page); e380bebe477154 Mel Gorman 2019-03-05 1008 e380bebe477154 Mel Gorman 2019-03-05 1009 /* Try get exclusive access under lock */ e380bebe477154 Mel Gorman 2019-03-05 1010 if (!skip_updated) { e380bebe477154 Mel Gorman 2019-03-05 1011 skip_updated = true; e380bebe477154 Mel Gorman 2019-03-05 1012 if (test_and_set_skip(cc, page, low_pfn)) e380bebe477154 Mel Gorman 2019-03-05 1013 goto isolate_abort; e380bebe477154 Mel Gorman 2019-03-05 1014 } 2a1402aa044b55 Mel Gorman 2012-10-08 1015 29c0dde830f8c0 Vlastimil Babka 2015-09-08 1016 /* 29c0dde830f8c0 Vlastimil Babka 2015-09-08 1017 * Page become compound since the non-locked check, 29c0dde830f8c0 Vlastimil Babka 2015-09-08 1018 * and it's on LRU. It can only be a THP so the order 29c0dde830f8c0 Vlastimil Babka 2015-09-08 1019 * is safe to read and it's 0 for tail pages. 29c0dde830f8c0 Vlastimil Babka 2015-09-08 1020 */ 1da2f328fa643b Rik van Riel 2020-04-01 1021 if (unlikely(PageCompound(page) && !cc->alloc_contig)) { d8c6546b1aea84 Matthew Wilcox (Oracle 2019-09-23 1022) low_pfn += compound_nr(page) - 1; 9df41314390b81 Alex Shi 2020-12-15 1023 SetPageLRU(page); 9df41314390b81 Alex Shi 2020-12-15 1024 goto isolate_fail_put; bc835011afbea3 Andrea Arcangeli 2011-01-13 1025 } d99fd5feb0ac1d Alex Shi 2021-02-24 1026 } fa9add641b1b1c Hugh Dickins 2012-05-29 1027 1da2f328fa643b Rik van Riel 2020-04-01 1028 /* The whole page is taken off the LRU; skip the tail pages. */ 1da2f328fa643b Rik van Riel 2020-04-01 1029 if (PageCompound(page)) 1da2f328fa643b Rik van Riel 2020-04-01 1030 low_pfn += compound_nr(page) - 1; bc835011afbea3 Andrea Arcangeli 2011-01-13 1031 748446bb6b5a93 Mel Gorman 2010-05-24 1032 /* Successfully isolated */ 46ae6b2cc2a479 Yu Zhao 2021-02-24 1033 del_page_from_lru_list(page, lruvec); 1da2f328fa643b Rik van Riel 2020-04-01 1034 mod_node_page_state(page_pgdat(page), 9de4f22a60f731 Huang Ying 2020-04-06 1035 NR_ISOLATED_ANON + page_is_file_lru(page), 6c357848b44b40 Matthew Wilcox (Oracle 2020-08-14 1036) thp_nr_pages(page)); b6c750163c0d13 Joonsoo Kim 2014-04-07 1037 b6c750163c0d13 Joonsoo Kim 2014-04-07 1038 isolate_success: fdd048e12c9a46 Vlastimil Babka 2016-05-19 1039 list_add(&page->lru, &cc->migratepages); 38935861d85a4d Zi Yan 2020-11-13 1040 cc->nr_migratepages += compound_nr(page); 38935861d85a4d Zi Yan 2020-11-13 1041 nr_isolated += compound_nr(page); 748446bb6b5a93 Mel Gorman 2010-05-24 1042 804d3121ba5f03 Mel Gorman 2019-03-05 1043 /* 804d3121ba5f03 Mel Gorman 2019-03-05 1044 * Avoid isolating too much unless this block is being cb2dcaf023c2cf Mel Gorman 2019-03-05 1045 * rescanned (e.g. dirty/writeback pages, parallel allocation) cb2dcaf023c2cf Mel Gorman 2019-03-05 1046 * or a lock is contended. For contention, isolate quickly to cb2dcaf023c2cf Mel Gorman 2019-03-05 1047 * potentially remove one source of contention. 804d3121ba5f03 Mel Gorman 2019-03-05 1048 */ 38935861d85a4d Zi Yan 2020-11-13 1049 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX && cb2dcaf023c2cf Mel Gorman 2019-03-05 1050 !cc->rescan && !cc->contended) { 31b8384a555d94 Hillf Danton 2012-01-10 1051 ++low_pfn; 748446bb6b5a93 Mel Gorman 2010-05-24 1052 break; 748446bb6b5a93 Mel Gorman 2010-05-24 1053 } fdd048e12c9a46 Vlastimil Babka 2016-05-19 1054 fdd048e12c9a46 Vlastimil Babka 2016-05-19 1055 continue; 9df41314390b81 Alex Shi 2020-12-15 1056 9df41314390b81 Alex Shi 2020-12-15 1057 isolate_fail_put: 9df41314390b81 Alex Shi 2020-12-15 1058 /* Avoid potential deadlock in freeing page under lru_lock */ 9df41314390b81 Alex Shi 2020-12-15 1059 if (locked) { 6168d0da2b479c Alex Shi 2020-12-15 1060 unlock_page_lruvec_irqrestore(locked, flags); 6168d0da2b479c Alex Shi 2020-12-15 1061 locked = NULL; 9df41314390b81 Alex Shi 2020-12-15 1062 } 9df41314390b81 Alex Shi 2020-12-15 1063 put_page(page); 9df41314390b81 Alex Shi 2020-12-15 1064 fdd048e12c9a46 Vlastimil Babka 2016-05-19 1065 isolate_fail: fdd048e12c9a46 Vlastimil Babka 2016-05-19 1066 if (!skip_on_failure) fdd048e12c9a46 Vlastimil Babka 2016-05-19 1067 continue; fdd048e12c9a46 Vlastimil Babka 2016-05-19 1068 fdd048e12c9a46 Vlastimil Babka 2016-05-19 1069 /* fdd048e12c9a46 Vlastimil Babka 2016-05-19 1070 * We have isolated some pages, but then failed. Release them fdd048e12c9a46 Vlastimil Babka 2016-05-19 1071 * instead of migrating, as we cannot form the cc->order buddy fdd048e12c9a46 Vlastimil Babka 2016-05-19 1072 * page anyway. fdd048e12c9a46 Vlastimil Babka 2016-05-19 1073 */ fdd048e12c9a46 Vlastimil Babka 2016-05-19 1074 if (nr_isolated) { fdd048e12c9a46 Vlastimil Babka 2016-05-19 1075 if (locked) { 6168d0da2b479c Alex Shi 2020-12-15 1076 unlock_page_lruvec_irqrestore(locked, flags); 6168d0da2b479c Alex Shi 2020-12-15 1077 locked = NULL; fdd048e12c9a46 Vlastimil Babka 2016-05-19 1078 } fdd048e12c9a46 Vlastimil Babka 2016-05-19 1079 putback_movable_pages(&cc->migratepages); fdd048e12c9a46 Vlastimil Babka 2016-05-19 1080 cc->nr_migratepages = 0; fdd048e12c9a46 Vlastimil Babka 2016-05-19 1081 nr_isolated = 0; fdd048e12c9a46 Vlastimil Babka 2016-05-19 1082 } fdd048e12c9a46 Vlastimil Babka 2016-05-19 1083 fdd048e12c9a46 Vlastimil Babka 2016-05-19 1084 if (low_pfn < next_skip_pfn) { fdd048e12c9a46 Vlastimil Babka 2016-05-19 1085 low_pfn = next_skip_pfn - 1; fdd048e12c9a46 Vlastimil Babka 2016-05-19 1086 /* fdd048e12c9a46 Vlastimil Babka 2016-05-19 1087 * The check near the loop beginning would have updated fdd048e12c9a46 Vlastimil Babka 2016-05-19 1088 * next_skip_pfn too, but this is a bit simpler. fdd048e12c9a46 Vlastimil Babka 2016-05-19 1089 */ fdd048e12c9a46 Vlastimil Babka 2016-05-19 1090 next_skip_pfn += 1UL << cc->order; fdd048e12c9a46 Vlastimil Babka 2016-05-19 1091 } 31b8384a555d94 Hillf Danton 2012-01-10 1092 } 748446bb6b5a93 Mel Gorman 2010-05-24 1093 99c0fd5e51c447 Vlastimil Babka 2014-10-09 1094 /* 99c0fd5e51c447 Vlastimil Babka 2014-10-09 1095 * The PageBuddy() check could have potentially brought us outside 99c0fd5e51c447 Vlastimil Babka 2014-10-09 1096 * the range to be scanned. 99c0fd5e51c447 Vlastimil Babka 2014-10-09 1097 */ 99c0fd5e51c447 Vlastimil Babka 2014-10-09 1098 if (unlikely(low_pfn > end_pfn)) 99c0fd5e51c447 Vlastimil Babka 2014-10-09 1099 low_pfn = end_pfn; 99c0fd5e51c447 Vlastimil Babka 2014-10-09 1100 9df41314390b81 Alex Shi 2020-12-15 1101 page = NULL; 9df41314390b81 Alex Shi 2020-12-15 1102 e380bebe477154 Mel Gorman 2019-03-05 1103 isolate_abort: c67fe3752abe6a Mel Gorman 2012-08-21 1104 if (locked) 6168d0da2b479c Alex Shi 2020-12-15 1105 unlock_page_lruvec_irqrestore(locked, flags); 9df41314390b81 Alex Shi 2020-12-15 1106 if (page) { 9df41314390b81 Alex Shi 2020-12-15 1107 SetPageLRU(page); 9df41314390b81 Alex Shi 2020-12-15 1108 put_page(page); 9df41314390b81 Alex Shi 2020-12-15 1109 } 748446bb6b5a93 Mel Gorman 2010-05-24 1110 50b5b094e683f8 Vlastimil Babka 2014-01-21 1111 /* 804d3121ba5f03 Mel Gorman 2019-03-05 1112 * Updated the cached scanner pfn once the pageblock has been scanned 804d3121ba5f03 Mel Gorman 2019-03-05 1113 * Pages will either be migrated in which case there is no point 804d3121ba5f03 Mel Gorman 2019-03-05 1114 * scanning in the near future or migration failed in which case the 804d3121ba5f03 Mel Gorman 2019-03-05 1115 * failure reason may persist. The block is marked for skipping if 804d3121ba5f03 Mel Gorman 2019-03-05 1116 * there were no pages isolated in the block or if the block is 804d3121ba5f03 Mel Gorman 2019-03-05 1117 * rescanned twice in a row. 50b5b094e683f8 Vlastimil Babka 2014-01-21 1118 */ 804d3121ba5f03 Mel Gorman 2019-03-05 1119 if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) { e380bebe477154 Mel Gorman 2019-03-05 1120 if (valid_page && !skip_updated) e380bebe477154 Mel Gorman 2019-03-05 1121 set_pageblock_skip(valid_page); e380bebe477154 Mel Gorman 2019-03-05 1122 update_cached_migrate(cc, low_pfn); e380bebe477154 Mel Gorman 2019-03-05 1123 } bb13ffeb9f6bfe Mel Gorman 2012-10-08 1124 e34d85f0e3c60f Joonsoo Kim 2015-02-11 1125 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, e34d85f0e3c60f Joonsoo Kim 2015-02-11 1126 nr_scanned, nr_isolated); b7aba6984dc048 Mel Gorman 2011-01-13 1127 670105a25608af Mel Gorman 2019-08-02 1128 fatal_pending: 7f354a548d1cb6 David Rientjes 2017-02-22 1129 cc->total_migrate_scanned += nr_scanned; 397487db696cae Mel Gorman 2012-10-19 1130 if (nr_isolated) 010fc29a45a2e8 Minchan Kim 2012-12-20 1131 count_compact_events(COMPACTISOLATED, nr_isolated); 397487db696cae Mel Gorman 2012-10-19 1132 2fe86e00040761 Michal Nazarewicz 2012-01-30 1133 return low_pfn; 2fe86e00040761 Michal Nazarewicz 2012-01-30 1134 } 2fe86e00040761 Michal Nazarewicz 2012-01-30 1135 :::::: The code at line 799 was first introduced by commit :::::: edc2ca61249679298c1f343cd9c549964b8df4b4 mm, compaction: move pageblock checks up from isolate_migratepages_range() :::::: TO: Vlastimil Babka <vbabka@xxxxxxx> :::::: CC: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@xxxxxxxxxxxx
Attachment:
.config.gz
Description: application/gzip