Add proper bio_split() error handling. For any error, call raid_end_bio_io() and return. Except for discard, where we end the bio directly. Reviewed-by: Yu Kuai <yukuai3@xxxxxxxxxx> Reviewed-by: Hannes Reinecke <hare@xxxxxxx> Signed-off-by: John Garry <john.g.garry@xxxxxxxxxx> --- drivers/md/raid10.c | 47 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index ff73db2f6c41..8c7f5daa073a 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1159,6 +1159,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, int slot = r10_bio->read_slot; struct md_rdev *err_rdev = NULL; gfp_t gfp = GFP_NOIO; + int error; if (slot >= 0 && r10_bio->devs[slot].rdev) { /* @@ -1206,6 +1207,10 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, if (max_sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, max_sectors, gfp, &conf->bio_split); + if (IS_ERR(split)) { + error = PTR_ERR(split); + goto err_handle; + } bio_chain(split, bio); allow_barrier(conf); submit_bio_noacct(bio); @@ -1236,6 +1241,11 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, mddev_trace_remap(mddev, read_bio, r10_bio->sector); submit_bio_noacct(read_bio); return; +err_handle: + atomic_dec(&rdev->nr_pending); + bio->bi_status = errno_to_blk_status(error); + set_bit(R10BIO_Uptodate, &r10_bio->state); + raid_end_bio_io(r10_bio); } static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, @@ -1343,9 +1353,10 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, struct r10bio *r10_bio) { struct r10conf *conf = mddev->private; - int i; + int i, k; sector_t sectors; int max_sectors; + int error; if ((mddev_is_clustered(mddev) && md_cluster_ops->area_resyncing(mddev, WRITE, @@ -1478,6 +1489,10 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, if (r10_bio->sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, r10_bio->sectors, GFP_NOIO, &conf->bio_split); + if (IS_ERR(split)) { + error = PTR_ERR(split); + goto err_handle; + } bio_chain(split, bio); allow_barrier(conf); submit_bio_noacct(bio); @@ -1499,6 +1514,26 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, raid10_write_one_disk(mddev, r10_bio, bio, true, i); } one_write_done(r10_bio); + return; +err_handle: + for (k = 0; k < i; k++) { + int d = r10_bio->devs[k].devnum; + struct md_rdev *rdev = conf->mirrors[d].rdev; + struct md_rdev *rrdev = conf->mirrors[d].replacement; + + if (r10_bio->devs[k].bio) { + rdev_dec_pending(rdev, mddev); + r10_bio->devs[k].bio = NULL; + } + if (r10_bio->devs[k].repl_bio) { + rdev_dec_pending(rrdev, mddev); + r10_bio->devs[k].repl_bio = NULL; + } + } + + bio->bi_status = errno_to_blk_status(error); + set_bit(R10BIO_Uptodate, &r10_bio->state); + raid_end_bio_io(r10_bio); } static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) @@ -1640,6 +1675,11 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio) if (remainder) { split_size = stripe_size - remainder; split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split); + if (IS_ERR(split)) { + bio->bi_status = errno_to_blk_status(PTR_ERR(split)); + bio_endio(bio); + return 0; + } bio_chain(split, bio); allow_barrier(conf); /* Resend the fist split part */ @@ -1650,6 +1690,11 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio) if (remainder) { split_size = bio_sectors(bio) - remainder; split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split); + if (IS_ERR(split)) { + bio->bi_status = errno_to_blk_status(PTR_ERR(split)); + bio_endio(bio); + return 0; + } bio_chain(split, bio); allow_barrier(conf); /* Resend the second split part */ -- 2.31.1