Hi Guoqing, Thank you for the patch! Yet something to improve: [auto build test ERROR on song-md/md-next] [also build test ERROR on v5.14-rc6 next-20210818] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Guoqing-Jiang/raid1-ensure-write-behind-bio-has-less-than-BIO_MAX_VECS-sectors/20210818-154106 base: git://git.kernel.org/pub/scm/linux/kernel/git/song/md.git md-next config: x86_64-randconfig-a011-20210816 (attached as .config) compiler: clang version 14.0.0 (https://github.com/llvm/llvm-project d2b574a4dea5b718e4386bf2e26af0126e5978ce) reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # https://github.com/0day-ci/linux/commit/abf22557456363eb6fd1d1d09332f5261d61796c git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Guoqing-Jiang/raid1-ensure-write-behind-bio-has-less-than-BIO_MAX_VECS-sectors/20210818-154106 git checkout abf22557456363eb6fd1d1d09332f5261d61796c # save the attached .config to linux build tree mkdir build_dir COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross O=build_dir ARCH=x86_64 SHELL=/bin/bash drivers/md/ If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@xxxxxxxxx> All errors (new ones prefixed by >>): >> drivers/md/raid1.c:1388:30: error: use of undeclared identifier 'mirror' if (test_bit(WriteMostly, &mirror->rdev->flags)) ^ drivers/md/raid1.c:1471:56: error: use of undeclared identifier 'PAGE_SECTORS' max_sectors = min_t(int, max_sectors, BIO_MAX_VECS * PAGE_SECTORS); ^ drivers/md/raid1.c:1471:56: error: use of undeclared identifier 'PAGE_SECTORS' 3 errors generated. vim +/mirror +1388 drivers/md/raid1.c 1320 1321 static void raid1_write_request(struct mddev *mddev, struct bio *bio, 1322 int max_write_sectors) 1323 { 1324 struct r1conf *conf = mddev->private; 1325 struct r1bio *r1_bio; 1326 int i, disks; 1327 struct bitmap *bitmap = mddev->bitmap; 1328 unsigned long flags; 1329 struct md_rdev *blocked_rdev; 1330 struct blk_plug_cb *cb; 1331 struct raid1_plug_cb *plug = NULL; 1332 int first_clone; 1333 int max_sectors; 1334 bool write_behind = false; 1335 1336 if (mddev_is_clustered(mddev) && 1337 md_cluster_ops->area_resyncing(mddev, WRITE, 1338 bio->bi_iter.bi_sector, bio_end_sector(bio))) { 1339 1340 DEFINE_WAIT(w); 1341 for (;;) { 1342 prepare_to_wait(&conf->wait_barrier, 1343 &w, TASK_IDLE); 1344 if (!md_cluster_ops->area_resyncing(mddev, WRITE, 1345 bio->bi_iter.bi_sector, 1346 bio_end_sector(bio))) 1347 break; 1348 schedule(); 1349 } 1350 finish_wait(&conf->wait_barrier, &w); 1351 } 1352 1353 /* 1354 * Register the new request and wait if the reconstruction 1355 * thread has put up a bar for new requests. 1356 * Continue immediately if no resync is active currently. 1357 */ 1358 wait_barrier(conf, bio->bi_iter.bi_sector); 1359 1360 r1_bio = alloc_r1bio(mddev, bio); 1361 r1_bio->sectors = max_write_sectors; 1362 1363 if (conf->pending_count >= max_queued_requests) { 1364 md_wakeup_thread(mddev->thread); 1365 raid1_log(mddev, "wait queued"); 1366 wait_event(conf->wait_barrier, 1367 conf->pending_count < max_queued_requests); 1368 } 1369 /* first select target devices under rcu_lock and 1370 * inc refcount on their rdev. Record them by setting 1371 * bios[x] to bio 1372 * If there are known/acknowledged bad blocks on any device on 1373 * which we have seen a write error, we want to avoid writing those 1374 * blocks. 1375 * This potentially requires several writes to write around 1376 * the bad blocks. Each set of writes gets it's own r1bio 1377 * with a set of bios attached. 1378 */ 1379 1380 disks = conf->raid_disks * 2; 1381 retry_write: 1382 blocked_rdev = NULL; 1383 rcu_read_lock(); 1384 max_sectors = r1_bio->sectors; 1385 for (i = 0; i < disks; i++) { 1386 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 1387 > 1388 if (test_bit(WriteMostly, &mirror->rdev->flags)) 1389 write_behind = true; 1390 1391 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 1392 atomic_inc(&rdev->nr_pending); 1393 blocked_rdev = rdev; 1394 break; 1395 } 1396 r1_bio->bios[i] = NULL; 1397 if (!rdev || test_bit(Faulty, &rdev->flags)) { 1398 if (i < conf->raid_disks) 1399 set_bit(R1BIO_Degraded, &r1_bio->state); 1400 continue; 1401 } 1402 1403 atomic_inc(&rdev->nr_pending); 1404 if (test_bit(WriteErrorSeen, &rdev->flags)) { 1405 sector_t first_bad; 1406 int bad_sectors; 1407 int is_bad; 1408 1409 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors, 1410 &first_bad, &bad_sectors); 1411 if (is_bad < 0) { 1412 /* mustn't write here until the bad block is 1413 * acknowledged*/ 1414 set_bit(BlockedBadBlocks, &rdev->flags); 1415 blocked_rdev = rdev; 1416 break; 1417 } 1418 if (is_bad && first_bad <= r1_bio->sector) { 1419 /* Cannot write here at all */ 1420 bad_sectors -= (r1_bio->sector - first_bad); 1421 if (bad_sectors < max_sectors) 1422 /* mustn't write more than bad_sectors 1423 * to other devices yet 1424 */ 1425 max_sectors = bad_sectors; 1426 rdev_dec_pending(rdev, mddev); 1427 /* We don't set R1BIO_Degraded as that 1428 * only applies if the disk is 1429 * missing, so it might be re-added, 1430 * and we want to know to recover this 1431 * chunk. 1432 * In this case the device is here, 1433 * and the fact that this chunk is not 1434 * in-sync is recorded in the bad 1435 * block log 1436 */ 1437 continue; 1438 } 1439 if (is_bad) { 1440 int good_sectors = first_bad - r1_bio->sector; 1441 if (good_sectors < max_sectors) 1442 max_sectors = good_sectors; 1443 } 1444 } 1445 r1_bio->bios[i] = bio; 1446 } 1447 rcu_read_unlock(); 1448 1449 if (unlikely(blocked_rdev)) { 1450 /* Wait for this device to become unblocked */ 1451 int j; 1452 1453 for (j = 0; j < i; j++) 1454 if (r1_bio->bios[j]) 1455 rdev_dec_pending(conf->mirrors[j].rdev, mddev); 1456 r1_bio->state = 0; 1457 allow_barrier(conf, bio->bi_iter.bi_sector); 1458 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); 1459 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1460 wait_barrier(conf, bio->bi_iter.bi_sector); 1461 goto retry_write; 1462 } 1463 1464 /* 1465 * When using a bitmap, we may call alloc_behind_master_bio below. 1466 * alloc_behind_master_bio allocates a copy of the data payload a page 1467 * at a time and thus needs a new bio that can fit the whole payload 1468 * this bio in page sized chunks. 1469 */ 1470 if (write_behind && bitmap) 1471 max_sectors = min_t(int, max_sectors, BIO_MAX_VECS * PAGE_SECTORS); 1472 if (max_sectors < bio_sectors(bio)) { 1473 struct bio *split = bio_split(bio, max_sectors, 1474 GFP_NOIO, &conf->bio_split); 1475 bio_chain(split, bio); 1476 submit_bio_noacct(bio); 1477 bio = split; 1478 r1_bio->master_bio = bio; 1479 r1_bio->sectors = max_sectors; 1480 } 1481 1482 if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue)) 1483 r1_bio->start_time = bio_start_io_acct(bio); 1484 atomic_set(&r1_bio->remaining, 1); 1485 atomic_set(&r1_bio->behind_remaining, 0); 1486 1487 first_clone = 1; 1488 1489 for (i = 0; i < disks; i++) { 1490 struct bio *mbio = NULL; 1491 struct md_rdev *rdev = conf->mirrors[i].rdev; 1492 if (!r1_bio->bios[i]) 1493 continue; 1494 1495 if (first_clone) { 1496 /* do behind I/O ? 1497 * Not if there are too many, or cannot 1498 * allocate memory, or a reader on WriteMostly 1499 * is waiting for behind writes to flush */ 1500 if (bitmap && 1501 (atomic_read(&bitmap->behind_writes) 1502 < mddev->bitmap_info.max_write_behind) && 1503 !waitqueue_active(&bitmap->behind_wait)) { 1504 alloc_behind_master_bio(r1_bio, bio); 1505 } 1506 1507 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors, 1508 test_bit(R1BIO_BehindIO, &r1_bio->state)); 1509 first_clone = 0; 1510 } 1511 1512 if (r1_bio->behind_master_bio) 1513 mbio = bio_clone_fast(r1_bio->behind_master_bio, 1514 GFP_NOIO, &mddev->bio_set); 1515 else 1516 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); 1517 1518 if (r1_bio->behind_master_bio) { 1519 if (test_bit(CollisionCheck, &rdev->flags)) 1520 wait_for_serialization(rdev, r1_bio); 1521 if (test_bit(WriteMostly, &rdev->flags)) 1522 atomic_inc(&r1_bio->behind_remaining); 1523 } else if (mddev->serialize_policy) 1524 wait_for_serialization(rdev, r1_bio); 1525 1526 r1_bio->bios[i] = mbio; 1527 1528 mbio->bi_iter.bi_sector = (r1_bio->sector + 1529 conf->mirrors[i].rdev->data_offset); 1530 bio_set_dev(mbio, conf->mirrors[i].rdev->bdev); 1531 mbio->bi_end_io = raid1_end_write_request; 1532 mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA)); 1533 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && 1534 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) && 1535 conf->raid_disks - mddev->degraded > 1) 1536 mbio->bi_opf |= MD_FAILFAST; 1537 mbio->bi_private = r1_bio; 1538 1539 atomic_inc(&r1_bio->remaining); 1540 1541 if (mddev->gendisk) 1542 trace_block_bio_remap(mbio, disk_devt(mddev->gendisk), 1543 r1_bio->sector); 1544 /* flush_pending_writes() needs access to the rdev so...*/ 1545 mbio->bi_bdev = (void *)conf->mirrors[i].rdev; 1546 1547 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); 1548 if (cb) 1549 plug = container_of(cb, struct raid1_plug_cb, cb); 1550 else 1551 plug = NULL; 1552 if (plug) { 1553 bio_list_add(&plug->pending, mbio); 1554 plug->pending_cnt++; 1555 } else { 1556 spin_lock_irqsave(&conf->device_lock, flags); 1557 bio_list_add(&conf->pending_bio_list, mbio); 1558 conf->pending_count++; 1559 spin_unlock_irqrestore(&conf->device_lock, flags); 1560 md_wakeup_thread(mddev->thread); 1561 } 1562 } 1563 1564 r1_bio_write_done(r1_bio); 1565 1566 /* In case raid1d snuck in to freeze_array */ 1567 wake_up(&conf->wait_barrier); 1568 } 1569 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@xxxxxxxxxxxx
Attachment:
.config.gz
Description: application/gzip