Hi Frank, FYI, the error/warning was bisected to this commit, please ignore it if it's irrelevant. tree: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable head: 495206a68b359eb6117d0860861578113bbb94e7 commit: 383ad37bda9ba2414ce6abff61fecdc09afc623a [77/88] mm/hugetlb: add pre-HVO framework config: x86_64-buildonly-randconfig-001-20250131 (https://download.01.org/0day-ci/archive/20250131/202501311312.ZTWda9Ip-lkp@xxxxxxxxx/config) compiler: gcc-11 (Debian 11.3.0-12) 11.3.0 reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250131/202501311312.ZTWda9Ip-lkp@xxxxxxxxx/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@xxxxxxxxx> | Closes: https://lore.kernel.org/oe-kbuild-all/202501311312.ZTWda9Ip-lkp@xxxxxxxxx/ All errors (new ones prefixed by >>): mm/hugetlb_vmemmap.c: In function '__hugetlb_vmemmap_optimize_folios': >> mm/hugetlb_vmemmap.c:704:17: error: label 'out' used but not defined 704 | goto out; | ^~~~ vim +/out +704 mm/hugetlb_vmemmap.c 651 652 static void __hugetlb_vmemmap_optimize_folios(struct hstate *h, 653 struct list_head *folio_list, 654 bool boot) 655 { 656 struct folio *folio; 657 int nr_to_optimize; 658 LIST_HEAD(vmemmap_pages); 659 unsigned long flags = VMEMMAP_REMAP_NO_TLB_FLUSH | VMEMMAP_SYNCHRONIZE_RCU; 660 661 nr_to_optimize = 0; 662 list_for_each_entry(folio, folio_list, lru) { 663 int ret; 664 unsigned long spfn, epfn; 665 666 if (boot && folio_test_hugetlb_vmemmap_optimized(folio)) { 667 /* 668 * Already optimized by pre-HVO, just map the 669 * mirrored tail page structs RO. 670 */ 671 spfn = (unsigned long)&folio->page; 672 epfn = spfn + pages_per_huge_page(h); 673 vmemmap_wrprotect_hvo(spfn, epfn, folio_nid(folio), 674 HUGETLB_VMEMMAP_RESERVE_SIZE); 675 register_page_bootmem_memmap(pfn_to_section_nr(spfn), 676 &folio->page, 677 HUGETLB_VMEMMAP_RESERVE_SIZE); 678 static_branch_inc(&hugetlb_optimize_vmemmap_key); 679 continue; 680 } 681 682 nr_to_optimize++; 683 684 ret = hugetlb_vmemmap_split_folio(h, folio); 685 686 /* 687 * Spliting the PMD requires allocating a page, thus lets fail 688 * early once we encounter the first OOM. No point in retrying 689 * as it can be dynamically done on remap with the memory 690 * we get back from the vmemmap deduplication. 691 */ 692 if (ret == -ENOMEM) 693 break; 694 } 695 696 if (!nr_to_optimize) 697 /* 698 * All pre-HVO folios, nothing left to do. It's ok if 699 * there is a mix of pre-HVO and not yet HVO-ed folios 700 * here, as __hugetlb_vmemmap_optimize_folio() will 701 * skip any folios that already have the optimized flag 702 * set, see vmemmap_should_optimize_folio(). 703 */ > 704 goto out; 705 706 flush_tlb_all(); 707 708 list_for_each_entry(folio, folio_list, lru) { 709 int ret; 710 711 ret = __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, flags); 712 /* only need to synchronize_rcu() once for each batch */ 713 flags &= ~VMEMMAP_SYNCHRONIZE_RCU; 714 715 /* 716 * Pages to be freed may have been accumulated. If we 717 * encounter an ENOMEM, free what we have and try again. 718 * This can occur in the case that both spliting fails 719 * halfway and head page allocation also failed. In this 720 * case __hugetlb_vmemmap_optimize_folio() would free memory 721 * allowing more vmemmap remaps to occur. 722 */ 723 if (ret == -ENOMEM && !list_empty(&vmemmap_pages)) { 724 flush_tlb_all(); 725 free_vmemmap_page_list(&vmemmap_pages); 726 INIT_LIST_HEAD(&vmemmap_pages); 727 __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, flags); 728 } 729 } 730 731 flush_tlb_all(); 732 free_vmemmap_page_list(&vmemmap_pages); 733 } 734 -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki