[bug report] mm/mmap: change do_mas_align_munmap() to avoid preallocations for sidetree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hello Liam Howlett,

The patch fecd1f7f7502: "mm/mmap: change do_mas_align_munmap() to
avoid preallocations for sidetree" from Jun 17, 2022, leads to the
following Smatch static checker warning:

	mm/mmap.c:2431 do_mas_align_munmap()
	warn: missing error code here? 'munmap_sidetree()' failed. 'error' = '0'

mm/mmap.c
    2363 static int
    2364 do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
    2365                     struct mm_struct *mm, unsigned long start,
    2366                     unsigned long end, struct list_head *uf, bool downgrade)
    2367 {
    2368         struct vm_area_struct *prev, *next = NULL;
    2369         struct maple_tree mt_detach;
    2370         int count = 0;
    2371         int error = -ENOMEM;
    2372         MA_STATE(mas_detach, &mt_detach, 0, 0);
    2373         mt_init_flags(&mt_detach, MT_FLAGS_LOCK_EXTERN);
    2374         mt_set_external_lock(&mt_detach, &mm->mmap_lock);
    2375 
    2376         if (mas_preallocate(mas, vma, GFP_KERNEL))
    2377                 return -ENOMEM;
    2378 
    2379         mas->last = end - 1;
    2380         /*
    2381          * If we need to split any vma, do it now to save pain later.
    2382          *
    2383          * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
    2384          * unmapped vm_area_struct will remain in use: so lower split_vma
    2385          * places tmp vma above, and higher split_vma places tmp vma below.
    2386          */
    2387 
    2388         /* Does it split the first one? */
    2389         if (start > vma->vm_start) {
    2390 
    2391                 /*
    2392                  * Make sure that map_count on return from munmap() will
    2393                  * not exceed its limit; but let map_count go just above
    2394                  * its limit temporarily, to help free resources as expected.
    2395                  */
    2396                 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
    2397                         goto map_count_exceeded;
    2398 
    2399                 /*
    2400                  * mas_pause() is not needed since mas->index needs to be set
    2401                  * differently than vma->vm_end anyways.
    2402                  */
    2403                 error = __split_vma(mm, vma, start, 0);
    2404                 if (error)
    2405                         goto start_split_failed;
    2406 
    2407                 mas_set(mas, start);
    2408                 vma = mas_walk(mas);
    2409         }
    2410 
    2411         prev = mas_prev(mas, 0);
    2412         if (unlikely((!prev)))
    2413                 mas_set(mas, start);
    2414 
    2415         /*
    2416          * Detach a range of VMAs from the mm. Using next as a temp variable as
    2417          * it is always overwritten.
    2418          */
    2419         mas_for_each(mas, next, end - 1) {
    2420                 /* Does it split the end? */
    2421                 if (next->vm_end > end) {
    2422                         struct vm_area_struct *split;
    2423 
    2424                         error = __split_vma(mm, next, end, 1);
    2425                         if (error)
    2426                                 goto end_split_failed;
    2427 
    2428                         mas_set(mas, end);
    2429                         split = mas_prev(mas, 0);
    2430                         if (munmap_sidetree(split, &mas_detach))
--> 2431                                 goto munmap_sidetree_failed;

Need "error = -ENOMEM;"

    2432 
    2433                         count++;
    2434                         if (vma == next)
    2435                                 vma = split;
    2436                         break;
    2437                 }
    2438                 if (munmap_sidetree(next, &mas_detach))
    2439                         goto munmap_sidetree_failed;

Here too.

    2440 
    2441                 count++;
    2442 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
    2443                 BUG_ON(next->vm_start < start);
    2444                 BUG_ON(next->vm_start > end);
    2445 #endif
    2446         }
    2447 
    2448         if (!next)
    2449                 next = mas_next(mas, ULONG_MAX);
    2450 
    2451         if (unlikely(uf)) {
    2452                 /*
    2453                  * If userfaultfd_unmap_prep returns an error the vmas
    2454                  * will remain split, but userland will get a
    2455                  * highly unexpected error anyway. This is no
    2456                  * different than the case where the first of the two
    2457                  * __split_vma fails, but we don't undo the first
    2458                  * split, despite we could. This is unlikely enough
    2459                  * failure that it's not worth optimizing it for.
    2460                  */
    2461                 error = userfaultfd_unmap_prep(mm, start, end, uf);
    2462 
    2463                 if (error)
    2464                         goto userfaultfd_error;
    2465         }
    2466 
    2467         /* Point of no return */
    2468         mas_set_range(mas, start, end - 1);
    2469 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
    2470         /* Make sure no VMAs are about to be lost. */
    2471         {
    2472                 MA_STATE(test, &mt_detach, start, end - 1);
    2473                 struct vm_area_struct *vma_mas, *vma_test;
    2474                 int test_count = 0;
    2475 
    2476                 rcu_read_lock();
    2477                 vma_test = mas_find(&test, end - 1);
    2478                 mas_for_each(mas, vma_mas, end - 1) {
    2479                         BUG_ON(vma_mas != vma_test);
    2480                         test_count++;
    2481                         vma_test = mas_next(&test, end - 1);
    2482                 }
    2483                 rcu_read_unlock();
    2484                 BUG_ON(count != test_count);
    2485                 mas_set_range(mas, start, end - 1);
    2486         }
    2487 #endif
    2488         mas_store_prealloc(mas, NULL);
    2489         mm->map_count -= count;
    2490         /*
    2491          * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
    2492          * VM_GROWSUP VMA. Such VMAs can change their size under
    2493          * down_read(mmap_lock) and collide with the VMA we are about to unmap.
    2494          */
    2495         if (downgrade) {
    2496                 if (next && (next->vm_flags & VM_GROWSDOWN))
    2497                         downgrade = false;
    2498                 else if (prev && (prev->vm_flags & VM_GROWSUP))
    2499                         downgrade = false;
    2500                 else
    2501                         mmap_write_downgrade(mm);
    2502         }
    2503 
    2504         unmap_region(mm, &mt_detach, vma, prev, next, start, end);
    2505         /* Statistics and freeing VMAs */
    2506         mas_set(&mas_detach, start);
    2507         remove_mt(mm, &mas_detach);
    2508         __mt_destroy(&mt_detach);
    2509 
    2510 
    2511         validate_mm(mm);
    2512         return downgrade ? 1 : 0;
    2513 
    2514 userfaultfd_error:
    2515 munmap_sidetree_failed:
    2516 end_split_failed:
    2517         __mt_destroy(&mt_detach);
    2518 start_split_failed:
    2519 map_count_exceeded:
    2520         mas_destroy(mas);
    2521         return error;
    2522 }

regards,
dan carpenter




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux