tree: git://git.cmpxchg.org/linux-mmotm.git master head: 6f11685c34f638e200dd9e821491584ef5717d57 commit: 91c106f5d623b94305af3fd91113de1cba768d73 [124/234] mm/vmalloc: hugepage vmalloc mappings config: arm64-allyesconfig (attached as .config) compiler: aarch64-linux-gcc (GCC) 7.4.0 reproduce: wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross git checkout 91c106f5d623b94305af3fd91113de1cba768d73 # save the attached .config to linux build tree GCC_VERSION=7.4.0 make.cross ARCH=arm64 If you fix the issue, kindly add following tag Reported-by: kbuild test robot <lkp@xxxxxxxxx> All errors (new ones prefixed by >>): mm/vmalloc.c: In function 'vmap_range': mm/vmalloc.c:325:19: error: 'start' undeclared (first use in this function); did you mean 'stat'? flush_cache_vmap(start, end); ^~~~~ stat mm/vmalloc.c:325:19: note: each undeclared identifier is reported only once for each function it appears in mm/vmalloc.c: In function 'vmalloc_to_page': >> mm/vmalloc.c:520:6: error: implicit declaration of function 'p4d_large'; did you mean 'p4d_page'? [-Werror=implicit-function-declaration] if (p4d_large(*p4d)) ^~~~~~~~~ p4d_page >> mm/vmalloc.c:530:6: error: implicit declaration of function 'pud_large'; did you mean 'pud_page'? [-Werror=implicit-function-declaration] if (pud_large(*pud)) ^~~~~~~~~ pud_page >> mm/vmalloc.c:540:6: error: implicit declaration of function 'pmd_large'; did you mean 'pmd_page'? [-Werror=implicit-function-declaration] if (pmd_large(*pmd)) ^~~~~~~~~ pmd_page cc1: some warnings being treated as errors vim +520 mm/vmalloc.c 317 318 int vmap_range(unsigned long addr, 319 unsigned long end, phys_addr_t phys_addr, pgprot_t prot, 320 unsigned int max_page_shift) 321 { 322 int ret; 323 324 ret = vmap_range_noflush(addr, end, phys_addr, prot, max_page_shift); > 325 flush_cache_vmap(start, end); 326 return ret; 327 } 328 329 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, 330 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 331 { 332 pte_t *pte; 333 334 /* 335 * nr is a running index into the array which helps higher level 336 * callers keep track of where we're up to. 337 */ 338 339 pte = pte_alloc_kernel(pmd, addr); 340 if (!pte) 341 return -ENOMEM; 342 do { 343 struct page *page = pages[*nr]; 344 345 if (WARN_ON(!pte_none(*pte))) 346 return -EBUSY; 347 if (WARN_ON(!page)) 348 return -ENOMEM; 349 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 350 (*nr)++; 351 } while (pte++, addr += PAGE_SIZE, addr != end); 352 return 0; 353 } 354 355 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr, 356 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 357 { 358 pmd_t *pmd; 359 unsigned long next; 360 361 pmd = pmd_alloc(&init_mm, pud, addr); 362 if (!pmd) 363 return -ENOMEM; 364 do { 365 next = pmd_addr_end(addr, end); 366 if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr)) 367 return -ENOMEM; 368 } while (pmd++, addr = next, addr != end); 369 return 0; 370 } 371 372 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr, 373 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 374 { 375 pud_t *pud; 376 unsigned long next; 377 378 pud = pud_alloc(&init_mm, p4d, addr); 379 if (!pud) 380 return -ENOMEM; 381 do { 382 next = pud_addr_end(addr, end); 383 if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr)) 384 return -ENOMEM; 385 } while (pud++, addr = next, addr != end); 386 return 0; 387 } 388 389 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr, 390 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 391 { 392 p4d_t *p4d; 393 unsigned long next; 394 395 p4d = p4d_alloc(&init_mm, pgd, addr); 396 if (!p4d) 397 return -ENOMEM; 398 do { 399 next = p4d_addr_end(addr, end); 400 if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr)) 401 return -ENOMEM; 402 } while (p4d++, addr = next, addr != end); 403 return 0; 404 } 405 406 /* 407 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and 408 * will have pfns corresponding to the "pages" array. 409 * 410 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 411 */ 412 static int vmap_pages_range_noflush(unsigned long start, unsigned long end, 413 pgprot_t prot, struct page **pages) 414 { 415 pgd_t *pgd; 416 unsigned long next; 417 unsigned long addr = start; 418 int err = 0; 419 int nr = 0; 420 421 BUG_ON(addr >= end); 422 pgd = pgd_offset_k(addr); 423 do { 424 next = pgd_addr_end(addr, end); 425 err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr); 426 if (err) 427 return err; 428 } while (pgd++, addr = next, addr != end); 429 430 return nr; 431 } 432 433 static int vmap_pages_range(unsigned long start, unsigned long end, 434 pgprot_t prot, struct page **pages) 435 { 436 int ret; 437 438 ret = vmap_pages_range_noflush(start, end, prot, pages); 439 flush_cache_vmap(start, end); 440 return ret; 441 } 442 443 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 444 static int vmap_hpages_range(unsigned long start, unsigned long end, 445 pgprot_t prot, struct page **pages, 446 unsigned int page_shift) 447 { 448 unsigned long addr = start; 449 unsigned int i, nr = (end - start) >> (PAGE_SHIFT + page_shift); 450 451 for (i = 0; i < nr; i++) { 452 int err; 453 454 err = vmap_range_noflush(addr, 455 addr + (PAGE_SIZE << page_shift), 456 __pa(page_address(pages[i])), prot, 457 page_shift); 458 if (err) 459 return err; 460 461 addr += PAGE_SIZE << page_shift; 462 } 463 flush_cache_vmap(start, end); 464 465 return nr; 466 } 467 #else 468 static int vmap_hpages_range(unsigned long start, unsigned long end, 469 pgprot_t prot, struct page **pages, 470 unsigned int page_shift) 471 { 472 BUG_ON(page_shift != PAGE_SIZE); 473 return vmap_pages_range(start, end, prot, pages); 474 } 475 #endif 476 477 478 int is_vmalloc_or_module_addr(const void *x) 479 { 480 /* 481 * ARM, x86-64 and sparc64 put modules in a special place, 482 * and fall back on vmalloc() if that fails. Others 483 * just put it in the vmalloc space. 484 */ 485 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 486 unsigned long addr = (unsigned long)x; 487 if (addr >= MODULES_VADDR && addr < MODULES_END) 488 return 1; 489 #endif 490 return is_vmalloc_addr(x); 491 } 492 493 /* 494 * Walk a vmap address to the struct page it maps. 495 */ 496 struct page *vmalloc_to_page(const void *vmalloc_addr) 497 { 498 unsigned long addr = (unsigned long) vmalloc_addr; 499 struct page *page = NULL; 500 pgd_t *pgd; 501 p4d_t *p4d; 502 pud_t *pud; 503 pmd_t *pmd; 504 pte_t *ptep, pte; 505 506 /* 507 * XXX we might need to change this if we add VIRTUAL_BUG_ON for 508 * architectures that do not vmalloc module space 509 */ 510 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 511 512 pgd = pgd_offset_k(addr); 513 if (pgd_none(*pgd)) 514 return NULL; 515 516 p4d = p4d_offset(pgd, addr); 517 if (p4d_none(*p4d)) 518 return NULL; 519 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP > 520 if (p4d_large(*p4d)) 521 return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT); 522 #endif 523 if (WARN_ON_ONCE(p4d_bad(*p4d))) 524 return NULL; 525 526 pud = pud_offset(p4d, addr); 527 if (pud_none(*pud)) 528 return NULL; 529 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP > 530 if (pud_large(*pud)) 531 return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 532 #endif 533 if (WARN_ON_ONCE(pud_bad(*pud))) 534 return NULL; 535 536 pmd = pmd_offset(pud, addr); 537 if (pmd_none(*pmd)) 538 return NULL; 539 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP > 540 if (pmd_large(*pmd)) 541 return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 542 #endif 543 if (WARN_ON_ONCE(pmd_bad(*pmd))) 544 return NULL; 545 546 ptep = pte_offset_map(pmd, addr); 547 pte = *ptep; 548 if (pte_present(pte)) 549 page = pte_page(pte); 550 pte_unmap(ptep); 551 552 return page; 553 } 554 EXPORT_SYMBOL(vmalloc_to_page); 555 --- 0-DAY kernel test infrastructure Open Source Technology Center https://lists.01.org/pipermail/kbuild-all Intel Corporation
Attachment:
.config.gz
Description: application/gzip