Hi Guoqing, Thank you for the patch! Yet something to improve: [auto build test ERROR on song-md/md-next] [also build test ERROR on v5.14-rc5 next-20210812] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Guoqing-Jiang/raid1-ensure-bio-doesn-t-have-more-than-BIO_MAX_VECS-sectors/20210813-140810 base: git://git.kernel.org/pub/scm/linux/kernel/git/song/md.git md-next config: nds32-randconfig-r035-20210813 (attached as .config) compiler: nds32le-linux-gcc (GCC) 10.3.0 reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # https://github.com/0day-ci/linux/commit/29b7720a83de1deea0d8ecfafe0db46146636b15 git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Guoqing-Jiang/raid1-ensure-bio-doesn-t-have-more-than-BIO_MAX_VECS-sectors/20210813-140810 git checkout 29b7720a83de1deea0d8ecfafe0db46146636b15 # save the attached .config to linux build tree mkdir build_dir COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-10.3.0 make.cross O=build_dir ARCH=nds32 SHELL=/bin/bash drivers/md/ If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@xxxxxxxxx> All errors (new ones prefixed by >>): In file included from include/linux/kernel.h:15, from include/asm-generic/bug.h:20, from ./arch/nds32/include/generated/asm/bug.h:1, from include/linux/bug.h:5, from include/linux/mmdebug.h:5, from include/linux/gfp.h:5, from include/linux/slab.h:15, from drivers/md/raid1.c:26: drivers/md/raid1.c: In function 'raid1_write_request': >> drivers/md/raid1.c:1459:55: error: 'PAGE_SECTORS' undeclared (first use in this function); did you mean 'PAGE_MEMORY'? 1459 | max_sectors = min_t(int, max_sectors, BIO_MAX_VECS * PAGE_SECTORS); | ^~~~~~~~~~~~ include/linux/minmax.h:20:39: note: in definition of macro '__typecheck' 20 | (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1))) | ^ include/linux/minmax.h:36:24: note: in expansion of macro '__safe_cmp' 36 | __builtin_choose_expr(__safe_cmp(x, y), \ | ^~~~~~~~~~ include/linux/minmax.h:104:27: note: in expansion of macro '__careful_cmp' 104 | #define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <) | ^~~~~~~~~~~~~ drivers/md/raid1.c:1459:16: note: in expansion of macro 'min_t' 1459 | max_sectors = min_t(int, max_sectors, BIO_MAX_VECS * PAGE_SECTORS); | ^~~~~ drivers/md/raid1.c:1459:55: note: each undeclared identifier is reported only once for each function it appears in 1459 | max_sectors = min_t(int, max_sectors, BIO_MAX_VECS * PAGE_SECTORS); | ^~~~~~~~~~~~ include/linux/minmax.h:20:39: note: in definition of macro '__typecheck' 20 | (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1))) | ^ include/linux/minmax.h:36:24: note: in expansion of macro '__safe_cmp' 36 | __builtin_choose_expr(__safe_cmp(x, y), \ | ^~~~~~~~~~ include/linux/minmax.h:104:27: note: in expansion of macro '__careful_cmp' 104 | #define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <) | ^~~~~~~~~~~~~ drivers/md/raid1.c:1459:16: note: in expansion of macro 'min_t' 1459 | max_sectors = min_t(int, max_sectors, BIO_MAX_VECS * PAGE_SECTORS); | ^~~~~ >> include/linux/minmax.h:36:2: error: first argument to '__builtin_choose_expr' not a constant 36 | __builtin_choose_expr(__safe_cmp(x, y), \ | ^~~~~~~~~~~~~~~~~~~~~ include/linux/minmax.h:104:27: note: in expansion of macro '__careful_cmp' 104 | #define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <) | ^~~~~~~~~~~~~ drivers/md/raid1.c:1459:16: note: in expansion of macro 'min_t' 1459 | max_sectors = min_t(int, max_sectors, BIO_MAX_VECS * PAGE_SECTORS); | ^~~~~ vim +1459 drivers/md/raid1.c 1320 1321 static void raid1_write_request(struct mddev *mddev, struct bio *bio, 1322 int max_write_sectors) 1323 { 1324 struct r1conf *conf = mddev->private; 1325 struct r1bio *r1_bio; 1326 int i, disks; 1327 struct bitmap *bitmap = mddev->bitmap; 1328 unsigned long flags; 1329 struct md_rdev *blocked_rdev; 1330 struct blk_plug_cb *cb; 1331 struct raid1_plug_cb *plug = NULL; 1332 int first_clone; 1333 int max_sectors; 1334 1335 if (mddev_is_clustered(mddev) && 1336 md_cluster_ops->area_resyncing(mddev, WRITE, 1337 bio->bi_iter.bi_sector, bio_end_sector(bio))) { 1338 1339 DEFINE_WAIT(w); 1340 for (;;) { 1341 prepare_to_wait(&conf->wait_barrier, 1342 &w, TASK_IDLE); 1343 if (!md_cluster_ops->area_resyncing(mddev, WRITE, 1344 bio->bi_iter.bi_sector, 1345 bio_end_sector(bio))) 1346 break; 1347 schedule(); 1348 } 1349 finish_wait(&conf->wait_barrier, &w); 1350 } 1351 1352 /* 1353 * Register the new request and wait if the reconstruction 1354 * thread has put up a bar for new requests. 1355 * Continue immediately if no resync is active currently. 1356 */ 1357 wait_barrier(conf, bio->bi_iter.bi_sector); 1358 1359 r1_bio = alloc_r1bio(mddev, bio); 1360 r1_bio->sectors = max_write_sectors; 1361 1362 if (conf->pending_count >= max_queued_requests) { 1363 md_wakeup_thread(mddev->thread); 1364 raid1_log(mddev, "wait queued"); 1365 wait_event(conf->wait_barrier, 1366 conf->pending_count < max_queued_requests); 1367 } 1368 /* first select target devices under rcu_lock and 1369 * inc refcount on their rdev. Record them by setting 1370 * bios[x] to bio 1371 * If there are known/acknowledged bad blocks on any device on 1372 * which we have seen a write error, we want to avoid writing those 1373 * blocks. 1374 * This potentially requires several writes to write around 1375 * the bad blocks. Each set of writes gets it's own r1bio 1376 * with a set of bios attached. 1377 */ 1378 1379 disks = conf->raid_disks * 2; 1380 retry_write: 1381 blocked_rdev = NULL; 1382 rcu_read_lock(); 1383 max_sectors = r1_bio->sectors; 1384 for (i = 0; i < disks; i++) { 1385 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 1386 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 1387 atomic_inc(&rdev->nr_pending); 1388 blocked_rdev = rdev; 1389 break; 1390 } 1391 r1_bio->bios[i] = NULL; 1392 if (!rdev || test_bit(Faulty, &rdev->flags)) { 1393 if (i < conf->raid_disks) 1394 set_bit(R1BIO_Degraded, &r1_bio->state); 1395 continue; 1396 } 1397 1398 atomic_inc(&rdev->nr_pending); 1399 if (test_bit(WriteErrorSeen, &rdev->flags)) { 1400 sector_t first_bad; 1401 int bad_sectors; 1402 int is_bad; 1403 1404 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors, 1405 &first_bad, &bad_sectors); 1406 if (is_bad < 0) { 1407 /* mustn't write here until the bad block is 1408 * acknowledged*/ 1409 set_bit(BlockedBadBlocks, &rdev->flags); 1410 blocked_rdev = rdev; 1411 break; 1412 } 1413 if (is_bad && first_bad <= r1_bio->sector) { 1414 /* Cannot write here at all */ 1415 bad_sectors -= (r1_bio->sector - first_bad); 1416 if (bad_sectors < max_sectors) 1417 /* mustn't write more than bad_sectors 1418 * to other devices yet 1419 */ 1420 max_sectors = bad_sectors; 1421 rdev_dec_pending(rdev, mddev); 1422 /* We don't set R1BIO_Degraded as that 1423 * only applies if the disk is 1424 * missing, so it might be re-added, 1425 * and we want to know to recover this 1426 * chunk. 1427 * In this case the device is here, 1428 * and the fact that this chunk is not 1429 * in-sync is recorded in the bad 1430 * block log 1431 */ 1432 continue; 1433 } 1434 if (is_bad) { 1435 int good_sectors = first_bad - r1_bio->sector; 1436 if (good_sectors < max_sectors) 1437 max_sectors = good_sectors; 1438 } 1439 } 1440 r1_bio->bios[i] = bio; 1441 } 1442 rcu_read_unlock(); 1443 1444 if (unlikely(blocked_rdev)) { 1445 /* Wait for this device to become unblocked */ 1446 int j; 1447 1448 for (j = 0; j < i; j++) 1449 if (r1_bio->bios[j]) 1450 rdev_dec_pending(conf->mirrors[j].rdev, mddev); 1451 r1_bio->state = 0; 1452 allow_barrier(conf, bio->bi_iter.bi_sector); 1453 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); 1454 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1455 wait_barrier(conf, bio->bi_iter.bi_sector); 1456 goto retry_write; 1457 } 1458 > 1459 max_sectors = min_t(int, max_sectors, BIO_MAX_VECS * PAGE_SECTORS); 1460 if (max_sectors < bio_sectors(bio)) { 1461 struct bio *split = bio_split(bio, max_sectors, 1462 GFP_NOIO, &conf->bio_split); 1463 bio_chain(split, bio); 1464 submit_bio_noacct(bio); 1465 bio = split; 1466 r1_bio->master_bio = bio; 1467 r1_bio->sectors = max_sectors; 1468 } 1469 1470 if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue)) 1471 r1_bio->start_time = bio_start_io_acct(bio); 1472 atomic_set(&r1_bio->remaining, 1); 1473 atomic_set(&r1_bio->behind_remaining, 0); 1474 1475 first_clone = 1; 1476 1477 for (i = 0; i < disks; i++) { 1478 struct bio *mbio = NULL; 1479 struct md_rdev *rdev = conf->mirrors[i].rdev; 1480 if (!r1_bio->bios[i]) 1481 continue; 1482 1483 if (first_clone) { 1484 /* do behind I/O ? 1485 * Not if there are too many, or cannot 1486 * allocate memory, or a reader on WriteMostly 1487 * is waiting for behind writes to flush */ 1488 if (bitmap && 1489 (atomic_read(&bitmap->behind_writes) 1490 < mddev->bitmap_info.max_write_behind) && 1491 !waitqueue_active(&bitmap->behind_wait)) { 1492 alloc_behind_master_bio(r1_bio, bio); 1493 } 1494 1495 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors, 1496 test_bit(R1BIO_BehindIO, &r1_bio->state)); 1497 first_clone = 0; 1498 } 1499 1500 if (r1_bio->behind_master_bio) 1501 mbio = bio_clone_fast(r1_bio->behind_master_bio, 1502 GFP_NOIO, &mddev->bio_set); 1503 else 1504 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); 1505 1506 if (r1_bio->behind_master_bio) { 1507 if (test_bit(CollisionCheck, &rdev->flags)) 1508 wait_for_serialization(rdev, r1_bio); 1509 if (test_bit(WriteMostly, &rdev->flags)) 1510 atomic_inc(&r1_bio->behind_remaining); 1511 } else if (mddev->serialize_policy) 1512 wait_for_serialization(rdev, r1_bio); 1513 1514 r1_bio->bios[i] = mbio; 1515 1516 mbio->bi_iter.bi_sector = (r1_bio->sector + 1517 conf->mirrors[i].rdev->data_offset); 1518 bio_set_dev(mbio, conf->mirrors[i].rdev->bdev); 1519 mbio->bi_end_io = raid1_end_write_request; 1520 mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA)); 1521 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && 1522 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) && 1523 conf->raid_disks - mddev->degraded > 1) 1524 mbio->bi_opf |= MD_FAILFAST; 1525 mbio->bi_private = r1_bio; 1526 1527 atomic_inc(&r1_bio->remaining); 1528 1529 if (mddev->gendisk) 1530 trace_block_bio_remap(mbio, disk_devt(mddev->gendisk), 1531 r1_bio->sector); 1532 /* flush_pending_writes() needs access to the rdev so...*/ 1533 mbio->bi_bdev = (void *)conf->mirrors[i].rdev; 1534 1535 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); 1536 if (cb) 1537 plug = container_of(cb, struct raid1_plug_cb, cb); 1538 else 1539 plug = NULL; 1540 if (plug) { 1541 bio_list_add(&plug->pending, mbio); 1542 plug->pending_cnt++; 1543 } else { 1544 spin_lock_irqsave(&conf->device_lock, flags); 1545 bio_list_add(&conf->pending_bio_list, mbio); 1546 conf->pending_count++; 1547 spin_unlock_irqrestore(&conf->device_lock, flags); 1548 md_wakeup_thread(mddev->thread); 1549 } 1550 } 1551 1552 r1_bio_write_done(r1_bio); 1553 1554 /* In case raid1d snuck in to freeze_array */ 1555 wake_up(&conf->wait_barrier); 1556 } 1557 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@xxxxxxxxxxxx
Attachment:
.config.gz
Description: application/gzip