Hello Matthew Wilcox (Oracle), The patch b0bb08b2d5f3: "mm/shmem: turn shmem_alloc_page() into shmem_alloc_folio()" from Apr 29, 2022, leads to the following Smatch static checker warning: mm/shmem.c:2337 shmem_mfill_atomic_pte() error: uninitialized symbol 'page'. mm/shmem.c 2303 int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, 2304 pmd_t *dst_pmd, 2305 struct vm_area_struct *dst_vma, 2306 unsigned long dst_addr, 2307 unsigned long src_addr, 2308 bool zeropage, bool wp_copy, 2309 struct page **pagep) 2310 { 2311 struct inode *inode = file_inode(dst_vma->vm_file); 2312 struct shmem_inode_info *info = SHMEM_I(inode); 2313 struct address_space *mapping = inode->i_mapping; 2314 gfp_t gfp = mapping_gfp_mask(mapping); 2315 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 2316 void *page_kaddr; 2317 struct folio *folio; 2318 struct page *page; 2319 int ret; 2320 pgoff_t max_off; 2321 2322 if (!shmem_inode_acct_block(inode, 1)) { 2323 /* 2324 * We may have got a page, returned -ENOENT triggering a retry, 2325 * and now we find ourselves with -ENOMEM. Release the page, to 2326 * avoid a BUG_ON in our caller. 2327 */ 2328 if (unlikely(*pagep)) { 2329 put_page(*pagep); 2330 *pagep = NULL; 2331 } 2332 return -ENOMEM; 2333 } 2334 2335 if (!*pagep) { 2336 ret = -ENOMEM; There used be an allocation here: page = shmem_alloc_page(gfp, info, pgoff); But the shmem_alloc_page() function is gone. --> 2337 if (!page) 2338 goto out_unacct_blocks; 2339 2340 if (!zeropage) { /* COPY */ 2341 page_kaddr = kmap_atomic(page); 2342 ret = copy_from_user(page_kaddr, 2343 (const void __user *)src_addr, 2344 PAGE_SIZE); 2345 kunmap_atomic(page_kaddr); 2346 2347 /* fallback to copy_from_user outside mmap_lock */ 2348 if (unlikely(ret)) { 2349 *pagep = page; 2350 ret = -ENOENT; 2351 /* don't free the page */ 2352 goto out_unacct_blocks; 2353 } 2354 2355 flush_dcache_page(page); 2356 } else { /* ZEROPAGE */ 2357 clear_user_highpage(page, dst_addr); 2358 } 2359 } else { 2360 page = *pagep; 2361 *pagep = NULL; 2362 } 2363 2364 VM_BUG_ON(PageLocked(page)); 2365 VM_BUG_ON(PageSwapBacked(page)); 2366 __SetPageLocked(page); 2367 __SetPageSwapBacked(page); 2368 __SetPageUptodate(page); 2369 2370 ret = -EFAULT; 2371 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2372 if (unlikely(pgoff >= max_off)) 2373 goto out_release; 2374 2375 folio = page_folio(page); 2376 ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, 2377 gfp & GFP_RECLAIM_MASK, dst_mm); 2378 if (ret) 2379 goto out_release; 2380 2381 ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, 2382 page, true, wp_copy); 2383 if (ret) 2384 goto out_delete_from_cache; 2385 2386 spin_lock_irq(&info->lock); 2387 info->alloced++; 2388 inode->i_blocks += BLOCKS_PER_PAGE; 2389 shmem_recalc_inode(inode); 2390 spin_unlock_irq(&info->lock); 2391 2392 unlock_page(page); 2393 return 0; 2394 out_delete_from_cache: 2395 delete_from_page_cache(page); 2396 out_release: 2397 unlock_page(page); 2398 put_page(page); 2399 out_unacct_blocks: 2400 shmem_inode_unacct_blocks(inode, 1); 2401 return ret; 2402 } regards, dan carpenter