tree: https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master head: d67bc7812221606e1886620a357b13f906814af7 commit: f16acaf328c5615fdaea74f9bd0b4019544532d6 [10034/14669] md/raid5: resize stripe_head when reshape array config: arc-randconfig-m031-20201011 (attached as .config) compiler: arceb-elf-gcc (GCC) 9.3.0 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@xxxxxxxxx> New smatch warnings: drivers/md/raid5.c:2539 resize_stripes() warn: inconsistent indenting Old smatch warnings: arch/arc/include/asm/thread_info.h:65 current_thread_info() error: uninitialized symbol 'sp'. drivers/md/raid5.c:1615 ops_run_compute6_1() error: uninitialized symbol 'target'. drivers/md/raid5.c:2840 raid5_end_write_request() error: uninitialized symbol 'rdev'. drivers/md/raid5.c:2845 raid5_end_write_request() error: uninitialized symbol 'rdev'. drivers/md/raid5.c:7170 setup_conf() warn: should 'mddev->new_chunk_sectors << 9' be a 64 bit type? drivers/md/raid5.c:8168 raid5_start_reshape() warn: statement has no effect 31 drivers/md/raid5.c:8244 raid5_start_reshape() warn: statement has no effect 31 vim +2539 drivers/md/raid5.c 2459 2460 static int resize_stripes(struct r5conf *conf, int newsize) 2461 { 2462 /* Make all the stripes able to hold 'newsize' devices. 2463 * New slots in each stripe get 'page' set to a new page. 2464 * 2465 * This happens in stages: 2466 * 1/ create a new kmem_cache and allocate the required number of 2467 * stripe_heads. 2468 * 2/ gather all the old stripe_heads and transfer the pages across 2469 * to the new stripe_heads. This will have the side effect of 2470 * freezing the array as once all stripe_heads have been collected, 2471 * no IO will be possible. Old stripe heads are freed once their 2472 * pages have been transferred over, and the old kmem_cache is 2473 * freed when all stripes are done. 2474 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 2475 * we simple return a failure status - no need to clean anything up. 2476 * 4/ allocate new pages for the new slots in the new stripe_heads. 2477 * If this fails, we don't bother trying the shrink the 2478 * stripe_heads down again, we just leave them as they are. 2479 * As each stripe_head is processed the new one is released into 2480 * active service. 2481 * 2482 * Once step2 is started, we cannot afford to wait for a write, 2483 * so we use GFP_NOIO allocations. 2484 */ 2485 struct stripe_head *osh, *nsh; 2486 LIST_HEAD(newstripes); 2487 struct disk_info *ndisks; 2488 int err = 0; 2489 struct kmem_cache *sc; 2490 int i; 2491 int hash, cnt; 2492 2493 md_allow_write(conf->mddev); 2494 2495 /* Step 1 */ 2496 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 2497 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 2498 0, 0, NULL); 2499 if (!sc) 2500 return -ENOMEM; 2501 2502 /* Need to ensure auto-resizing doesn't interfere */ 2503 mutex_lock(&conf->cache_size_mutex); 2504 2505 for (i = conf->max_nr_stripes; i; i--) { 2506 nsh = alloc_stripe(sc, GFP_KERNEL, newsize, conf); 2507 if (!nsh) 2508 break; 2509 2510 list_add(&nsh->lru, &newstripes); 2511 } 2512 if (i) { 2513 /* didn't get enough, give up */ 2514 while (!list_empty(&newstripes)) { 2515 nsh = list_entry(newstripes.next, struct stripe_head, lru); 2516 list_del(&nsh->lru); 2517 free_stripe(sc, nsh); 2518 } 2519 kmem_cache_destroy(sc); 2520 mutex_unlock(&conf->cache_size_mutex); 2521 return -ENOMEM; 2522 } 2523 /* Step 2 - Must use GFP_NOIO now. 2524 * OK, we have enough stripes, start collecting inactive 2525 * stripes and copying them over 2526 */ 2527 hash = 0; 2528 cnt = 0; 2529 list_for_each_entry(nsh, &newstripes, lru) { 2530 lock_device_hash_lock(conf, hash); 2531 wait_event_cmd(conf->wait_for_stripe, 2532 !list_empty(conf->inactive_list + hash), 2533 unlock_device_hash_lock(conf, hash), 2534 lock_device_hash_lock(conf, hash)); 2535 osh = get_free_stripe(conf, hash); 2536 unlock_device_hash_lock(conf, hash); 2537 2538 #if PAGE_SIZE != DEFAULT_STRIPE_SIZE > 2539 for (i = 0; i < osh->nr_pages; i++) { 2540 nsh->pages[i] = osh->pages[i]; 2541 osh->pages[i] = NULL; 2542 } 2543 #endif 2544 for(i=0; i<conf->pool_size; i++) { 2545 nsh->dev[i].page = osh->dev[i].page; 2546 nsh->dev[i].orig_page = osh->dev[i].page; 2547 nsh->dev[i].offset = osh->dev[i].offset; 2548 } 2549 nsh->hash_lock_index = hash; 2550 free_stripe(conf->slab_cache, osh); 2551 cnt++; 2552 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + 2553 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { 2554 hash++; 2555 cnt = 0; 2556 } 2557 } 2558 kmem_cache_destroy(conf->slab_cache); 2559 2560 /* Step 3. 2561 * At this point, we are holding all the stripes so the array 2562 * is completely stalled, so now is a good time to resize 2563 * conf->disks and the scribble region 2564 */ 2565 ndisks = kcalloc(newsize, sizeof(struct disk_info), GFP_NOIO); 2566 if (ndisks) { 2567 for (i = 0; i < conf->pool_size; i++) 2568 ndisks[i] = conf->disks[i]; 2569 2570 for (i = conf->pool_size; i < newsize; i++) { 2571 ndisks[i].extra_page = alloc_page(GFP_NOIO); 2572 if (!ndisks[i].extra_page) 2573 err = -ENOMEM; 2574 } 2575 2576 if (err) { 2577 for (i = conf->pool_size; i < newsize; i++) 2578 if (ndisks[i].extra_page) 2579 put_page(ndisks[i].extra_page); 2580 kfree(ndisks); 2581 } else { 2582 kfree(conf->disks); 2583 conf->disks = ndisks; 2584 } 2585 } else 2586 err = -ENOMEM; 2587 2588 mutex_unlock(&conf->cache_size_mutex); 2589 2590 conf->slab_cache = sc; 2591 conf->active_name = 1-conf->active_name; 2592 2593 /* Step 4, return new stripes to service */ 2594 while(!list_empty(&newstripes)) { 2595 nsh = list_entry(newstripes.next, struct stripe_head, lru); 2596 list_del_init(&nsh->lru); 2597 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@xxxxxxxxxxxx
Attachment:
.config.gz
Description: application/gzip