tree: https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master head: e3f80d3eae76c3557b3c9b5938ad01c0e6cf25ec commit: 5233d9beb1a81c08d8155b62df5e9635ee99ee27 [11986/12208] proc/ksm: add ksm stats to /proc/pid/smaps config: powerpc-allnoconfig (https://download.01.org/0day-ci/archive/20230823/202308232003.eoFc7Z2h-lkp@xxxxxxxxx/config) compiler: powerpc-linux-gcc (GCC) 13.2.0 reproduce: (https://download.01.org/0day-ci/archive/20230823/202308232003.eoFc7Z2h-lkp@xxxxxxxxx/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@xxxxxxxxx> | Closes: https://lore.kernel.org/oe-kbuild-all/202308232003.eoFc7Z2h-lkp@xxxxxxxxx/ Note: the linux-next/master HEAD e3f80d3eae76c3557b3c9b5938ad01c0e6cf25ec builds fine. It may have been fixed somewhere. All errors (new ones prefixed by >>): fs/proc/task_mmu.c: In function 'smaps_account': >> fs/proc/task_mmu.c:457:40: error: implicit declaration of function 'is_ksm_zero_pte' [-Werror=implicit-function-declaration] 457 | if (PageKsm(page) && (!pte || !is_ksm_zero_pte(*pte))) | ^~~~~~~~~~~~~~~ cc1: some warnings being treated as errors vim +/is_ksm_zero_pte +457 fs/proc/task_mmu.c 439 440 static void smaps_account(struct mem_size_stats *mss, pte_t *pte, 441 struct page *page, bool compound, bool young, bool dirty, 442 bool locked, bool migration) 443 { 444 int i, nr = compound ? compound_nr(page) : 1; 445 unsigned long size = nr * PAGE_SIZE; 446 447 /* 448 * First accumulate quantities that depend only on |size| and the type 449 * of the compound page. 450 */ 451 if (PageAnon(page)) { 452 mss->anonymous += size; 453 if (!PageSwapBacked(page) && !dirty && !PageDirty(page)) 454 mss->lazyfree += size; 455 } 456 > 457 if (PageKsm(page) && (!pte || !is_ksm_zero_pte(*pte))) 458 mss->ksm += size; 459 460 mss->resident += size; 461 /* Accumulate the size in pages that have been accessed. */ 462 if (young || page_is_young(page) || PageReferenced(page)) 463 mss->referenced += size; 464 465 /* 466 * Then accumulate quantities that may depend on sharing, or that may 467 * differ page-by-page. 468 * 469 * page_count(page) == 1 guarantees the page is mapped exactly once. 470 * If any subpage of the compound page mapped with PTE it would elevate 471 * page_count(). 472 * 473 * The page_mapcount() is called to get a snapshot of the mapcount. 474 * Without holding the page lock this snapshot can be slightly wrong as 475 * we cannot always read the mapcount atomically. It is not safe to 476 * call page_mapcount() even with PTL held if the page is not mapped, 477 * especially for migration entries. Treat regular migration entries 478 * as mapcount == 1. 479 */ 480 if ((page_count(page) == 1) || migration) { 481 smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty, 482 locked, true); 483 return; 484 } 485 for (i = 0; i < nr; i++, page++) { 486 int mapcount = page_mapcount(page); 487 unsigned long pss = PAGE_SIZE << PSS_SHIFT; 488 if (mapcount >= 2) 489 pss /= mapcount; 490 smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked, 491 mapcount < 2); 492 } 493 } 494 -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki