Hi Keith, I love your patch! Yet something to improve: [auto build test ERROR on linus/master] [also build test ERROR on v4.19-rc4 next-20180919] [if your patch is applied to the wrong git tree, please drop us a note to help improve the system] url: https://github.com/0day-ci/linux/commits/Keith-Busch/mm-faster-get-user-pages/20180920-184931 config: arm-oxnas_v6_defconfig (attached as .config) compiler: arm-linux-gnueabi-gcc (Debian 7.2.0-11) 7.2.0 reproduce: wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # save the attached .config to linux build tree GCC_VERSION=7.2.0 make.cross ARCH=arm All errors (new ones prefixed by >>): In file included from include/linux/mm.h:506:0, from mm/gup.c:6: include/linux/huge_mm.h:344:53: warning: 'struct gup_context' declared inside parameter list will not be visible outside of this definition or declaration static inline struct page *follow_devmap_pmd(struct gup_context *ctx, pmd_t *pmd) ^~~~~~~~~~~ include/linux/huge_mm.h:349:53: warning: 'struct gup_context' declared inside parameter list will not be visible outside of this definition or declaration static inline struct page *follow_devmap_pud(struct gup_context *ctx, pud_t *pud) ^~~~~~~~~~~ mm/gup.c: In function 'follow_pmd_mask': >> mm/gup.c:233:34: error: macro "follow_huge_pd" passed 5 arguments, but takes just 3 ctx->flags, PGDIR_SHIFT); ^ >> mm/gup.c:231:10: error: 'follow_huge_pd' undeclared (first use in this function); did you mean 'follow_page_pte'? page = follow_huge_pd(ctx->vma, ctx->address, ^~~~~~~~~~~~~~ follow_page_pte mm/gup.c:231:10: note: each undeclared identifier is reported only once for each function it appears in >> mm/gup.c:257:28: error: passing argument 1 of 'follow_devmap_pmd' from incompatible pointer type [-Werror=incompatible-pointer-types] page = follow_devmap_pmd(ctx, pmd); ^~~ In file included from include/linux/mm.h:506:0, from mm/gup.c:6: include/linux/huge_mm.h:344:28: note: expected 'struct gup_context *' but argument is of type 'struct follow_page_context *' static inline struct page *follow_devmap_pmd(struct gup_context *ctx, pmd_t *pmd) ^~~~~~~~~~~~~~~~~ mm/gup.c: In function 'follow_pud_mask': mm/gup.c:333:32: error: macro "follow_huge_pd" passed 5 arguments, but takes just 3 ctx->flags, PUD_SHIFT); ^ mm/gup.c:331:10: error: 'follow_huge_pd' undeclared (first use in this function); did you mean 'follow_page_pte'? page = follow_huge_pd(ctx->vma, ctx->address, ^~~~~~~~~~~~~~ follow_page_pte >> mm/gup.c:340:28: error: passing argument 1 of 'follow_devmap_pud' from incompatible pointer type [-Werror=incompatible-pointer-types] page = follow_devmap_pud(ctx, pud); ^~~ In file included from include/linux/mm.h:506:0, from mm/gup.c:6: include/linux/huge_mm.h:349:28: note: expected 'struct gup_context *' but argument is of type 'struct follow_page_context *' static inline struct page *follow_devmap_pud(struct gup_context *ctx, pud_t *pud) ^~~~~~~~~~~~~~~~~ mm/gup.c: In function 'follow_p4d_mask': mm/gup.c:366:32: error: macro "follow_huge_pd" passed 5 arguments, but takes just 3 ctx->flags, P4D_SHIFT); ^ mm/gup.c:364:10: error: 'follow_huge_pd' undeclared (first use in this function); did you mean 'follow_page_pte'? page = follow_huge_pd(ctx->vma, ctx->address, ^~~~~~~~~~~~~~ follow_page_pte mm/gup.c: In function 'follow_page_mask': mm/gup.c:414:34: error: macro "follow_huge_pd" passed 5 arguments, but takes just 3 ctx->flags, PGDIR_SHIFT); ^ mm/gup.c:412:10: error: 'follow_huge_pd' undeclared (first use in this function); did you mean 'follow_page_pte'? page = follow_huge_pd(ctx->vma, ctx->address, ^~~~~~~~~~~~~~ follow_page_pte cc1: some warnings being treated as errors vim +/follow_huge_pd +233 mm/gup.c 208 209 static struct page *follow_pmd_mask(struct follow_page_context *ctx, pud_t *pudp) 210 { 211 pmd_t *pmd, pmdval; 212 spinlock_t *ptl; 213 struct page *page; 214 struct mm_struct *mm = ctx->vma->vm_mm; 215 216 pmd = pmd_offset(pudp, ctx->address); 217 /* 218 * The READ_ONCE() will stabilize the pmdval in a register or 219 * on the stack so that it will stop changing under the code. 220 */ 221 pmdval = READ_ONCE(*pmd); 222 if (pmd_none(pmdval)) 223 return no_page_table(ctx); 224 if (pmd_huge(pmdval) && ctx->vma->vm_flags & VM_HUGETLB) { 225 page = follow_huge_pmd(mm, ctx->address, pmd, ctx->flags); 226 if (page) 227 return page; 228 return no_page_table(ctx); 229 } 230 if (is_hugepd(__hugepd(pmd_val(pmdval)))) { > 231 page = follow_huge_pd(ctx->vma, ctx->address, 232 __hugepd(pmd_val(pmdval)), > 233 ctx->flags, PGDIR_SHIFT); 234 if (page) 235 return page; 236 return no_page_table(ctx); 237 } 238 retry: 239 if (!pmd_present(pmdval)) { 240 if (likely(!(ctx->flags & FOLL_MIGRATION))) 241 return no_page_table(ctx); 242 VM_BUG_ON(thp_migration_supported() && 243 !is_pmd_migration_entry(pmdval)); 244 if (is_pmd_migration_entry(pmdval)) 245 pmd_migration_entry_wait(mm, pmd); 246 pmdval = READ_ONCE(*pmd); 247 /* 248 * MADV_DONTNEED may convert the pmd to null because 249 * mmap_sem is held in read mode 250 */ 251 if (pmd_none(pmdval)) 252 return no_page_table(ctx); 253 goto retry; 254 } 255 if (pmd_devmap(pmdval)) { 256 ptl = pmd_lock(mm, pmd); > 257 page = follow_devmap_pmd(ctx, pmd); 258 spin_unlock(ptl); 259 if (page) 260 return page; 261 } 262 if (likely(!pmd_trans_huge(pmdval))) 263 return follow_page_pte(ctx, pmd); 264 265 if ((ctx->flags & FOLL_NUMA) && pmd_protnone(pmdval)) 266 return no_page_table(ctx); 267 268 retry_locked: 269 ptl = pmd_lock(mm, pmd); 270 if (unlikely(pmd_none(*pmd))) { 271 spin_unlock(ptl); 272 return no_page_table(ctx); 273 } 274 if (unlikely(!pmd_present(*pmd))) { 275 spin_unlock(ptl); 276 if (likely(!(ctx->flags & FOLL_MIGRATION))) 277 return no_page_table(ctx); 278 pmd_migration_entry_wait(mm, pmd); 279 goto retry_locked; 280 } 281 if (unlikely(!pmd_trans_huge(*pmd))) { 282 spin_unlock(ptl); 283 return follow_page_pte(ctx, pmd); 284 } 285 if (ctx->flags & FOLL_SPLIT) { 286 int ret; 287 page = pmd_page(*pmd); 288 if (is_huge_zero_page(page)) { 289 spin_unlock(ptl); 290 ret = 0; 291 split_huge_pmd(ctx->vma, pmd, ctx->address); 292 if (pmd_trans_unstable(pmd)) 293 ret = -EBUSY; 294 } else { 295 get_page(page); 296 spin_unlock(ptl); 297 lock_page(page); 298 ret = split_huge_page(page); 299 unlock_page(page); 300 put_page(page); 301 if (pmd_none(*pmd)) 302 return no_page_table(ctx); 303 } 304 305 return ret ? ERR_PTR(ret) : 306 follow_page_pte(ctx, pmd); 307 } 308 page = follow_trans_huge_pmd(ctx->vma, ctx->address, pmd, ctx->flags); 309 spin_unlock(ptl); 310 ctx->page_mask = HPAGE_PMD_NR - 1; 311 return page; 312 } 313 314 static struct page *follow_pud_mask(struct follow_page_context *ctx, p4d_t *p4dp) 315 { 316 pud_t *pud; 317 spinlock_t *ptl; 318 struct page *page; 319 struct mm_struct *mm = ctx->vma->vm_mm; 320 321 pud = pud_offset(p4dp, ctx->address); 322 if (pud_none(*pud)) 323 return no_page_table(ctx); 324 if (pud_huge(*pud) && ctx->vma->vm_flags & VM_HUGETLB) { 325 page = follow_huge_pud(mm, ctx->address, pud, ctx->flags); 326 if (page) 327 return page; 328 return no_page_table(ctx); 329 } 330 if (is_hugepd(__hugepd(pud_val(*pud)))) { 331 page = follow_huge_pd(ctx->vma, ctx->address, 332 __hugepd(pud_val(*pud)), > 333 ctx->flags, PUD_SHIFT); 334 if (page) 335 return page; 336 return no_page_table(ctx); 337 } 338 if (pud_devmap(*pud)) { 339 ptl = pud_lock(mm, pud); > 340 page = follow_devmap_pud(ctx, pud); 341 spin_unlock(ptl); 342 if (page) 343 return page; 344 } 345 if (unlikely(pud_bad(*pud))) 346 return no_page_table(ctx); 347 348 return follow_pmd_mask(ctx, pud); 349 } 350 --- 0-DAY kernel test infrastructure Open Source Technology Center https://lists.01.org/pipermail/kbuild-all Intel Corporation
Attachment:
.config.gz
Description: application/gzip