Hi Baolin, kernel test robot noticed the following build warnings: https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/Baolin-Wang/mm-memory-extend-finish_fault-to-support-large-folio/20240530-100805 base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything patch link: https://lore.kernel.org/r/ec35a23026dd016705d211e85163cabe07681516.1717033868.git.baolin.wang%40linux.alibaba.com patch subject: [PATCH v3 4/6] mm: shmem: add mTHP support for anonymous shmem config: powerpc64-randconfig-r071-20240531 (https://download.01.org/0day-ci/archive/20240602/202406020203.14sT311e-lkp@xxxxxxxxx/config) compiler: clang version 19.0.0git (https://github.com/llvm/llvm-project bafda89a0944d947fc4b3b5663185e07a397ac30) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@xxxxxxxxx> | Reported-by: Dan Carpenter <dan.carpenter@xxxxxxxxxx> | Closes: https://lore.kernel.org/r/202406020203.14sT311e-lkp@xxxxxxxxx/ smatch warnings: mm/shmem.c:1766 shmem_alloc_and_add_folio() error: uninitialized symbol 'suitable_orders'. vim +/suitable_orders +1766 mm/shmem.c ededbc2c2f28a1 Baolin Wang 2024-05-30 1729 static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, ededbc2c2f28a1 Baolin Wang 2024-05-30 1730 gfp_t gfp, struct inode *inode, pgoff_t index, ededbc2c2f28a1 Baolin Wang 2024-05-30 1731 struct mm_struct *fault_mm, unsigned long orders) 800d8c63b2e989 Kirill A. Shutemov 2016-07-26 1732 { 3022fd7af9604d Hugh Dickins 2023-09-29 1733 struct address_space *mapping = inode->i_mapping; 0f0796945614b7 Mike Rapoport 2017-09-06 1734 struct shmem_inode_info *info = SHMEM_I(inode); ededbc2c2f28a1 Baolin Wang 2024-05-30 1735 struct vm_area_struct *vma = vmf ? vmf->vma : NULL; ededbc2c2f28a1 Baolin Wang 2024-05-30 1736 unsigned long suitable_orders; ededbc2c2f28a1 Baolin Wang 2024-05-30 1737 struct folio *folio = NULL; 3022fd7af9604d Hugh Dickins 2023-09-29 1738 long pages; ededbc2c2f28a1 Baolin Wang 2024-05-30 1739 int error, order; 800d8c63b2e989 Kirill A. Shutemov 2016-07-26 1740 396bcc5299c281 Matthew Wilcox (Oracle 2020-04-06 1741) if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) ededbc2c2f28a1 Baolin Wang 2024-05-30 1742 orders = 0; 800d8c63b2e989 Kirill A. Shutemov 2016-07-26 1743 ededbc2c2f28a1 Baolin Wang 2024-05-30 1744 if (orders > 0) { ededbc2c2f28a1 Baolin Wang 2024-05-30 1745 if (vma && vma_is_anon_shmem(vma)) { ededbc2c2f28a1 Baolin Wang 2024-05-30 1746 suitable_orders = anon_shmem_suitable_orders(inode, vmf, ededbc2c2f28a1 Baolin Wang 2024-05-30 1747 mapping, index, orders); ededbc2c2f28a1 Baolin Wang 2024-05-30 1748 } else if (orders & BIT(HPAGE_PMD_ORDER)) { 3022fd7af9604d Hugh Dickins 2023-09-29 1749 pages = HPAGE_PMD_NR; ededbc2c2f28a1 Baolin Wang 2024-05-30 1750 suitable_orders = BIT(HPAGE_PMD_ORDER); 3022fd7af9604d Hugh Dickins 2023-09-29 1751 index = round_down(index, HPAGE_PMD_NR); 3022fd7af9604d Hugh Dickins 2023-09-29 1752 3022fd7af9604d Hugh Dickins 2023-09-29 1753 /* 3022fd7af9604d Hugh Dickins 2023-09-29 1754 * Check for conflict before waiting on a huge allocation. 3022fd7af9604d Hugh Dickins 2023-09-29 1755 * Conflict might be that a huge page has just been allocated 3022fd7af9604d Hugh Dickins 2023-09-29 1756 * and added to page cache by a racing thread, or that there 3022fd7af9604d Hugh Dickins 2023-09-29 1757 * is already at least one small page in the huge extent. 3022fd7af9604d Hugh Dickins 2023-09-29 1758 * Be careful to retry when appropriate, but not forever! 3022fd7af9604d Hugh Dickins 2023-09-29 1759 * Elsewhere -EEXIST would be the right code, but not here. 3022fd7af9604d Hugh Dickins 2023-09-29 1760 */ 3022fd7af9604d Hugh Dickins 2023-09-29 1761 if (xa_find(&mapping->i_pages, &index, 3022fd7af9604d Hugh Dickins 2023-09-29 1762 index + HPAGE_PMD_NR - 1, XA_PRESENT)) 3022fd7af9604d Hugh Dickins 2023-09-29 1763 return ERR_PTR(-E2BIG); ededbc2c2f28a1 Baolin Wang 2024-05-30 1764 } suitable_orders uninitialized on else path. 52cd3b074050dd Lee Schermerhorn 2008-04-28 1765 ededbc2c2f28a1 Baolin Wang 2024-05-30 @1766 order = highest_order(suitable_orders); ededbc2c2f28a1 Baolin Wang 2024-05-30 1767 while (suitable_orders) { ededbc2c2f28a1 Baolin Wang 2024-05-30 1768 pages = 1UL << order; ededbc2c2f28a1 Baolin Wang 2024-05-30 1769 index = round_down(index, pages); ededbc2c2f28a1 Baolin Wang 2024-05-30 1770 folio = shmem_alloc_folio(gfp, order, info, index); ededbc2c2f28a1 Baolin Wang 2024-05-30 1771 if (folio) ededbc2c2f28a1 Baolin Wang 2024-05-30 1772 goto allocated; ededbc2c2f28a1 Baolin Wang 2024-05-30 1773 ededbc2c2f28a1 Baolin Wang 2024-05-30 1774 if (pages == HPAGE_PMD_NR) 3022fd7af9604d Hugh Dickins 2023-09-29 1775 count_vm_event(THP_FILE_FALLBACK); ededbc2c2f28a1 Baolin Wang 2024-05-30 1776 order = next_order(&suitable_orders, order); ededbc2c2f28a1 Baolin Wang 2024-05-30 1777 } 3022fd7af9604d Hugh Dickins 2023-09-29 1778 } else { 3022fd7af9604d Hugh Dickins 2023-09-29 1779 pages = 1; -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki