Instead use the special DM_MAPIO_KILL return value to return -EIO just like we do for the request based path. Note that dm-log-writes returned -ENOMEM in a few places, which now becomes -EIO instead. No consumer treats -ENOMEM special so this shouldn't be an issue (and it should use a mempool to start with to make guaranteed progress). Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- drivers/md/dm-crypt.c | 4 ++-- drivers/md/dm-flakey.c | 4 ++-- drivers/md/dm-integrity.c | 12 ++++++------ drivers/md/dm-log-writes.c | 4 ++-- drivers/md/dm-mpath.c | 13 ++++++++++--- drivers/md/dm-raid1.c | 6 +++--- drivers/md/dm-snap.c | 8 ++++---- drivers/md/dm-target.c | 2 +- drivers/md/dm-verity-target.c | 6 +++--- drivers/md/dm-zero.c | 4 ++-- drivers/md/dm.c | 16 +++++++++++----- 11 files changed, 46 insertions(+), 33 deletions(-) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index ebf9e72d479b..f4b51809db21 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -2795,10 +2795,10 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) * and is aligned to this size as defined in IO hints. */ if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0)) - return -EIO; + return DM_MAPIO_KILL; if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1))) - return -EIO; + return DM_MAPIO_KILL; io = dm_per_bio_data(bio, cc->per_bio_data_size); crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 13305a182611..e8f093b323ce 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -321,7 +321,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) if (bio_data_dir(bio) == READ) { if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags) && !test_bit(ERROR_WRITES, &fc->flags)) - return -EIO; + return DM_MAPIO_KILL; goto map_bio; } @@ -349,7 +349,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) /* * By default, error all I/O. */ - return -EIO; + return DM_MAPIO_KILL; } map_bio: diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index c7f7c8d76576..ee78fb471229 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -1352,13 +1352,13 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio) DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx", (unsigned long long)dio->range.logical_sector, bio_sectors(bio), (unsigned long long)ic->provided_data_sectors); - return -EIO; + return DM_MAPIO_KILL; } if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) { DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x", ic->sectors_per_block, (unsigned long long)dio->range.logical_sector, bio_sectors(bio)); - return -EIO; + return DM_MAPIO_KILL; } if (ic->sectors_per_block > 1) { @@ -1368,7 +1368,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio) if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary", bv.bv_offset, bv.bv_len, ic->sectors_per_block); - return -EIO; + return DM_MAPIO_KILL; } } } @@ -1383,18 +1383,18 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio) wanted_tag_size *= ic->tag_size; if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) { DMERR("Invalid integrity data size %u, expected %u", bip->bip_iter.bi_size, wanted_tag_size); - return -EIO; + return DM_MAPIO_KILL; } } } else { if (unlikely(bip != NULL)) { DMERR("Unexpected integrity data when using internal hash"); - return -EIO; + return DM_MAPIO_KILL; } } if (unlikely(ic->mode == 'R') && unlikely(dio->write)) - return -EIO; + return DM_MAPIO_KILL; get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 4dfe38655a49..e42264706c59 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -586,7 +586,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio) spin_lock_irq(&lc->blocks_lock); lc->logging_enabled = false; spin_unlock_irq(&lc->blocks_lock); - return -ENOMEM; + return DM_MAPIO_KILL; } INIT_LIST_HEAD(&block->list); pb->block = block; @@ -639,7 +639,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio) spin_lock_irq(&lc->blocks_lock); lc->logging_enabled = false; spin_unlock_irq(&lc->blocks_lock); - return -ENOMEM; + return DM_MAPIO_KILL; } src = kmap_atomic(bv.bv_page); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index b1cb0273b081..94b1aa917fcb 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -559,7 +559,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) return DM_MAPIO_REQUEUE; dm_report_EIO(m); - return -EIO; + return DM_MAPIO_KILL; } mpio->pgpath = pgpath; @@ -621,11 +621,18 @@ static void process_queued_bios(struct work_struct *work) blk_start_plug(&plug); while ((bio = bio_list_pop(&bios))) { r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio)); - if (r < 0 || r == DM_MAPIO_REQUEUE) { + switch (r) { + case DM_MAPIO_KILL: + r = -EIO; + /*FALLTHRU*/ + case DM_MAPIO_REQUEUE: bio->bi_error = r; bio_endio(bio); - } else if (r == DM_MAPIO_REMAPPED) + break; + case DM_MAPIO_REMAPPED: generic_make_request(bio); + break; + } } blk_finish_plug(&plug); } diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 5e30b08b91d9..d9c0c6a77eb5 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -1207,14 +1207,14 @@ static int mirror_map(struct dm_target *ti, struct bio *bio) r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); if (r < 0 && r != -EWOULDBLOCK) - return r; + return DM_MAPIO_KILL; /* * If region is not in-sync queue the bio. */ if (!r || (r == -EWOULDBLOCK)) { if (bio->bi_opf & REQ_RAHEAD) - return -EIO; + return DM_MAPIO_KILL; queue_bio(ms, bio, rw); return DM_MAPIO_SUBMITTED; @@ -1226,7 +1226,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio) */ m = choose_mirror(ms, bio->bi_iter.bi_sector); if (unlikely(!m)) - return -EIO; + return DM_MAPIO_KILL; dm_bio_record(&bio_record->details, bio); bio_record->m = m; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index e152d9817c81..5a7f73f9a6fb 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1690,7 +1690,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) /* Full snapshots are not usable */ /* To get here the table must be live so s->active is always set. */ if (!s->valid) - return -EIO; + return DM_MAPIO_KILL; /* FIXME: should only take write lock if we need * to copy an exception */ @@ -1698,7 +1698,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_data_dir(bio) == WRITE)) { - r = -EIO; + r = DM_MAPIO_KILL; goto out_unlock; } @@ -1723,7 +1723,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) if (!s->valid || s->snapshot_overflowed) { free_pending_exception(pe); - r = -EIO; + r = DM_MAPIO_KILL; goto out_unlock; } @@ -1741,7 +1741,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) DMERR("Snapshot overflowed: Unable to allocate exception."); } else __invalidate_snapshot(s, -ENOMEM); - r = -EIO; + r = DM_MAPIO_KILL; goto out_unlock; } } diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c index b242b750542f..c0d7e60820c4 100644 --- a/drivers/md/dm-target.c +++ b/drivers/md/dm-target.c @@ -128,7 +128,7 @@ static void io_err_dtr(struct dm_target *tt) static int io_err_map(struct dm_target *tt, struct bio *bio) { - return -EIO; + return DM_MAPIO_KILL; } static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq, diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 97de961a3bfc..9ed55468b98b 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -643,17 +643,17 @@ static int verity_map(struct dm_target *ti, struct bio *bio) if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) { DMERR_LIMIT("unaligned io"); - return -EIO; + return DM_MAPIO_KILL; } if (bio_end_sector(bio) >> (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) { DMERR_LIMIT("io out of range"); - return -EIO; + return DM_MAPIO_KILL; } if (bio_data_dir(bio) == WRITE) - return -EIO; + return DM_MAPIO_KILL; io = dm_per_bio_data(bio, ti->per_io_data_size); io->v = v; diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c index b616f11d8473..b65ca8dcfbdc 100644 --- a/drivers/md/dm-zero.c +++ b/drivers/md/dm-zero.c @@ -39,7 +39,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio) case REQ_OP_READ: if (bio->bi_opf & REQ_RAHEAD) { /* readahead of null bytes only wastes buffer cache */ - return -EIO; + return DM_MAPIO_KILL; } zero_fill_bio(bio); break; @@ -47,7 +47,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio) /* writes get silently dropped */ break; default: - return -EIO; + return DM_MAPIO_KILL; } bio_endio(bio); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6ef9500226c0..499f8209bacf 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1084,18 +1084,24 @@ static void __map_bio(struct dm_target_io *tio) r = ti->type->map(ti, clone); dm_offload_end(&o); - if (r == DM_MAPIO_REMAPPED) { + switch (r) { + case DM_MAPIO_SUBMITTED: + break; + case DM_MAPIO_REMAPPED: /* the bio has been remapped so dispatch it */ - trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, tio->io->bio->bi_bdev->bd_dev, sector); - generic_make_request(clone); - } else if (r < 0 || r == DM_MAPIO_REQUEUE) { + break; + case DM_MAPIO_KILL: + r = -EIO; + /*FALLTHRU*/ + case DM_MAPIO_REQUEUE: /* error the io and bail out, or requeue it if needed */ dec_pending(tio->io, r); free_tio(tio); - } else if (r != DM_MAPIO_SUBMITTED) { + break; + default: DMWARN("unimplemented target map return value: %d", r); BUG(); } -- 2.11.0 -- dm-devel mailing list dm-devel@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/dm-devel