tree: https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master head: 295ef6ab95e1d8e09e027ef260c50d662a382ef6 commit: cc35cad087f04dd8fa911fbe55c8e910137fc6c1 [3283/3365] huge tmpfs: fix Mlocked meminfo, track huge & unhuge mlocks config: x86_64-randconfig-v0-04071631 (attached as .config) reproduce: git checkout cc35cad087f04dd8fa911fbe55c8e910137fc6c1 # save the attached .config to linux build tree make ARCH=x86_64 All warnings (new ones prefixed by >>): In file included from include/asm-generic/bug.h:4:0, from arch/x86/include/asm/bug.h:35, from include/linux/bug.h:4, from include/linux/mmdebug.h:4, from include/linux/mm.h:8, from mm/rmap.c:48: mm/rmap.c: In function 'try_to_unmap_one': include/linux/compiler.h:510:38: error: call to '__compiletime_assert_1447' declared with attribute error: BUILD_BUG failed _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) ^ include/linux/compiler.h:493:4: note: in definition of macro '__compiletime_assert' prefix ## suffix(); \ ^ include/linux/compiler.h:510:2: note: in expansion of macro '_compiletime_assert' _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) ^ include/linux/bug.h:51:37: note: in expansion of macro 'compiletime_assert' #define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) ^ include/linux/bug.h:85:21: note: in expansion of macro 'BUILD_BUG_ON_MSG' #define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed") ^ include/linux/huge_mm.h:170:28: note: in expansion of macro 'BUILD_BUG' #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) ^ >> include/linux/huge_mm.h:52:26: note: in expansion of macro 'HPAGE_PMD_SHIFT' #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) ^ >> include/linux/huge_mm.h:53:26: note: in expansion of macro 'HPAGE_PMD_ORDER' #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) ^ >> mm/rmap.c:1447:36: note: in expansion of macro 'HPAGE_PMD_NR' mlock_vma_pages(page, pte ? 1 : HPAGE_PMD_NR); ^ vim +/HPAGE_PMD_ORDER +53 include/linux/huge_mm.h 79da5407e Kirill A. Shutemov 2012-12-12 46 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, 71e3aac07 Andrea Arcangeli 2011-01-13 47 #ifdef CONFIG_DEBUG_VM 71e3aac07 Andrea Arcangeli 2011-01-13 48 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, 71e3aac07 Andrea Arcangeli 2011-01-13 49 #endif 71e3aac07 Andrea Arcangeli 2011-01-13 50 }; 71e3aac07 Andrea Arcangeli 2011-01-13 51 d8c37c480 Naoya Horiguchi 2012-03-21 @52 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) d8c37c480 Naoya Horiguchi 2012-03-21 @53 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) d8c37c480 Naoya Horiguchi 2012-03-21 54 71e3aac07 Andrea Arcangeli 2011-01-13 55 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3565fce3a Dan Williams 2016-01-15 56 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 3565fce3a Dan Williams 2016-01-15 57 pmd_t *pmd, int flags); 3565fce3a Dan Williams 2016-01-15 58 fde52796d Aneesh Kumar K.V 2013-06-05 59 #define HPAGE_PMD_SHIFT PMD_SHIFT fde52796d Aneesh Kumar K.V 2013-06-05 60 #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) fde52796d Aneesh Kumar K.V 2013-06-05 61 #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) 71e3aac07 Andrea Arcangeli 2011-01-13 62 209959740 Alex Shi 2012-05-29 63 extern bool is_vma_temporary_stack(struct vm_area_struct *vma); 209959740 Alex Shi 2012-05-29 64 71e3aac07 Andrea Arcangeli 2011-01-13 65 #define transparent_hugepage_enabled(__vma) \ a664b2d85 Andrea Arcangeli 2011-01-13 66 ((transparent_hugepage_flags & \ a664b2d85 Andrea Arcangeli 2011-01-13 67 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \ 71e3aac07 Andrea Arcangeli 2011-01-13 68 (transparent_hugepage_flags & \ 71e3aac07 Andrea Arcangeli 2011-01-13 69 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \ a664b2d85 Andrea Arcangeli 2011-01-13 70 ((__vma)->vm_flags & VM_HUGEPAGE))) && \ a7d6e4ecd Andrea Arcangeli 2011-02-15 71 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \ a7d6e4ecd Andrea Arcangeli 2011-02-15 72 !is_vma_temporary_stack(__vma)) 79da5407e Kirill A. Shutemov 2012-12-12 73 #define transparent_hugepage_use_zero_page() \ 79da5407e Kirill A. Shutemov 2012-12-12 74 (transparent_hugepage_flags & \ 79da5407e Kirill A. Shutemov 2012-12-12 75 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) 71e3aac07 Andrea Arcangeli 2011-01-13 76 #ifdef CONFIG_DEBUG_VM 71e3aac07 Andrea Arcangeli 2011-01-13 77 #define transparent_hugepage_debug_cow() \ 71e3aac07 Andrea Arcangeli 2011-01-13 78 (transparent_hugepage_flags & \ 71e3aac07 Andrea Arcangeli 2011-01-13 79 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) 71e3aac07 Andrea Arcangeli 2011-01-13 80 #else /* CONFIG_DEBUG_VM */ 71e3aac07 Andrea Arcangeli 2011-01-13 81 #define transparent_hugepage_debug_cow() 0 71e3aac07 Andrea Arcangeli 2011-01-13 82 #endif /* CONFIG_DEBUG_VM */ 71e3aac07 Andrea Arcangeli 2011-01-13 83 71e3aac07 Andrea Arcangeli 2011-01-13 84 extern unsigned long transparent_hugepage_flags; ad0bed24e Kirill A. Shutemov 2016-01-15 85 9a982250f Kirill A. Shutemov 2016-01-15 86 extern void prep_transhuge_page(struct page *page); 9a982250f Kirill A. Shutemov 2016-01-15 87 extern void free_transhuge_page(struct page *page); 9a982250f Kirill A. Shutemov 2016-01-15 88 e9b61f198 Kirill A. Shutemov 2016-01-15 89 int split_huge_page_to_list(struct page *page, struct list_head *list); e9b61f198 Kirill A. Shutemov 2016-01-15 90 static inline int split_huge_page(struct page *page) e9b61f198 Kirill A. Shutemov 2016-01-15 91 { e9b61f198 Kirill A. Shutemov 2016-01-15 92 return split_huge_page_to_list(page, NULL); e9b61f198 Kirill A. Shutemov 2016-01-15 93 } 9a982250f Kirill A. Shutemov 2016-01-15 94 void deferred_split_huge_page(struct page *page); eef1b3ba0 Kirill A. Shutemov 2016-01-15 95 eef1b3ba0 Kirill A. Shutemov 2016-01-15 96 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, fec89c109 Kirill A. Shutemov 2016-03-17 97 unsigned long address, bool freeze); eef1b3ba0 Kirill A. Shutemov 2016-01-15 98 eef1b3ba0 Kirill A. Shutemov 2016-01-15 99 #define split_huge_pmd(__vma, __pmd, __address) \ eef1b3ba0 Kirill A. Shutemov 2016-01-15 100 do { \ eef1b3ba0 Kirill A. Shutemov 2016-01-15 101 pmd_t *____pmd = (__pmd); \ 5c7fb56e5 Dan Williams 2016-01-15 102 if (pmd_trans_huge(*____pmd) \ 5c7fb56e5 Dan Williams 2016-01-15 103 || pmd_devmap(*____pmd)) \ fec89c109 Kirill A. Shutemov 2016-03-17 104 __split_huge_pmd(__vma, __pmd, __address, \ fec89c109 Kirill A. Shutemov 2016-03-17 105 false); \ eef1b3ba0 Kirill A. Shutemov 2016-01-15 106 } while (0) ad0bed24e Kirill A. Shutemov 2016-01-15 107 2a52bcbcc Kirill A. Shutemov 2016-03-17 108 fec89c109 Kirill A. Shutemov 2016-03-17 109 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, fec89c109 Kirill A. Shutemov 2016-03-17 110 bool freeze, struct page *page); 2a52bcbcc Kirill A. Shutemov 2016-03-17 111 60ab3244e Andrea Arcangeli 2011-01-13 112 extern int hugepage_madvise(struct vm_area_struct *vma, 60ab3244e Andrea Arcangeli 2011-01-13 113 unsigned long *vm_flags, int advice); e1b9996b8 Kirill A. Shutemov 2015-09-08 114 extern void vma_adjust_trans_huge(struct vm_area_struct *vma, 94fcc585f Andrea Arcangeli 2011-01-13 115 unsigned long start, 94fcc585f Andrea Arcangeli 2011-01-13 116 unsigned long end, 94fcc585f Andrea Arcangeli 2011-01-13 117 long adjust_next); b6ec57f4b Kirill A. Shutemov 2016-01-21 118 extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, b6ec57f4b Kirill A. Shutemov 2016-01-21 119 struct vm_area_struct *vma); 025c5b245 Naoya Horiguchi 2012-03-21 120 /* mmap_sem must be held on entry */ b6ec57f4b Kirill A. Shutemov 2016-01-21 121 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, b6ec57f4b Kirill A. Shutemov 2016-01-21 122 struct vm_area_struct *vma) 025c5b245 Naoya Horiguchi 2012-03-21 123 { 81d1b09c6 Sasha Levin 2014-10-09 124 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); 5c7fb56e5 Dan Williams 2016-01-15 125 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) b6ec57f4b Kirill A. Shutemov 2016-01-21 126 return __pmd_trans_huge_lock(pmd, vma); 025c5b245 Naoya Horiguchi 2012-03-21 127 else 969e8d7e4 Chen Gang 2016-04-01 128 return NULL; 025c5b245 Naoya Horiguchi 2012-03-21 129 } 40b0a093a Hugh Dickins 2016-04-07 130 40b0a093a Hugh Dickins 2016-04-07 131 /* Repeat definition from linux/pageteam.h to force error if different */ 40b0a093a Hugh Dickins 2016-04-07 132 #define TEAM_LRU_WEIGHT_MASK ((1L << (HPAGE_PMD_ORDER + 1)) - 1) 40b0a093a Hugh Dickins 2016-04-07 133 40b0a093a Hugh Dickins 2016-04-07 134 /* 40b0a093a Hugh Dickins 2016-04-07 135 * hpage_nr_pages(page) returns the current LRU weight of the page. 40b0a093a Hugh Dickins 2016-04-07 136 * Beware of races when it is used: an Anon THPage might get split, 40b0a093a Hugh Dickins 2016-04-07 137 * so may need protection by compound lock or lruvec lock; a huge tmpfs 40b0a093a Hugh Dickins 2016-04-07 138 * team page might have weight 1 shifted from tail to head, or back to 40b0a093a Hugh Dickins 2016-04-07 139 * tail when disbanded, so may need protection by lruvec lock. 40b0a093a Hugh Dickins 2016-04-07 140 */ 2c888cfbc Rik van Riel 2011-01-13 141 static inline int hpage_nr_pages(struct page *page) 2c888cfbc Rik van Riel 2011-01-13 142 { 2c888cfbc Rik van Riel 2011-01-13 143 if (unlikely(PageTransHuge(page))) 2c888cfbc Rik van Riel 2011-01-13 144 return HPAGE_PMD_NR; 40b0a093a Hugh Dickins 2016-04-07 145 if (PageTeam(page)) 40b0a093a Hugh Dickins 2016-04-07 146 return atomic_long_read(&page->team_usage) & 40b0a093a Hugh Dickins 2016-04-07 147 TEAM_LRU_WEIGHT_MASK; 2c888cfbc Rik van Riel 2011-01-13 148 return 1; 2c888cfbc Rik van Riel 2011-01-13 149 } d10e63f29 Mel Gorman 2012-10-25 150 4daae3b4b Mel Gorman 2012-11-02 151 extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 4daae3b4b Mel Gorman 2012-11-02 152 unsigned long addr, pmd_t pmd, pmd_t *pmdp); d10e63f29 Mel Gorman 2012-10-25 153 56873f43a Wang, Yalin 2015-02-11 154 extern struct page *huge_zero_page; 56873f43a Wang, Yalin 2015-02-11 155 56873f43a Wang, Yalin 2015-02-11 156 static inline bool is_huge_zero_page(struct page *page) 56873f43a Wang, Yalin 2015-02-11 157 { 56873f43a Wang, Yalin 2015-02-11 158 return ACCESS_ONCE(huge_zero_page) == page; 56873f43a Wang, Yalin 2015-02-11 159 } 56873f43a Wang, Yalin 2015-02-11 160 fc4370443 Matthew Wilcox 2015-09-08 161 static inline bool is_huge_zero_pmd(pmd_t pmd) fc4370443 Matthew Wilcox 2015-09-08 162 { fc4370443 Matthew Wilcox 2015-09-08 163 return is_huge_zero_page(pmd_page(pmd)); fc4370443 Matthew Wilcox 2015-09-08 164 } fc4370443 Matthew Wilcox 2015-09-08 165 fc4370443 Matthew Wilcox 2015-09-08 166 struct page *get_huge_zero_page(void); 0fa423dfb Kirill A. Shutemov 2016-04-07 167 void put_huge_zero_page(void); fc4370443 Matthew Wilcox 2015-09-08 168 71e3aac07 Andrea Arcangeli 2011-01-13 169 #else /* CONFIG_TRANSPARENT_HUGEPAGE */ d8c37c480 Naoya Horiguchi 2012-03-21 @170 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) d8c37c480 Naoya Horiguchi 2012-03-21 171 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) d8c37c480 Naoya Horiguchi 2012-03-21 172 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) 71e3aac07 Andrea Arcangeli 2011-01-13 173 :::::: The code at line 53 was first introduced by commit :::::: d8c37c480678ebe09bc570f33e085e28049db035 thp: add HPAGE_PMD_* definitions for !CONFIG_TRANSPARENT_HUGEPAGE :::::: TO: Naoya Horiguchi <n-horiguchi@xxxxxxxxxxxxx> :::::: CC: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> --- 0-DAY kernel test infrastructure Open Source Technology Center https://lists.01.org/pipermail/kbuild-all Intel Corporation
Attachment:
.config.gz
Description: Binary data