Hi Here I'm sending that patch for REQ_NOWAIT for review. It is not tested, except for some trivial tests that involve logical volume activation. I found out that it seems easier to propagate the error using bits in clone_info rather than changing return codes for each affected function. Mikulas Signed-off-by: Mikulas Patocka <mpatocka@xxxxxxxxxx> --- drivers/md/dm.c | 51 +++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 45 insertions(+), 6 deletions(-) Index: linux-2.6/drivers/md/dm.c =================================================================== --- linux-2.6.orig/drivers/md/dm.c +++ linux-2.6/drivers/md/dm.c @@ -87,6 +87,8 @@ struct clone_info { unsigned int sector_count; bool is_abnormal_io:1; bool submit_as_polled:1; + bool is_nowait:1; + bool nowait_failed:1; }; static inline struct dm_target_io *clone_to_tio(struct bio *clone) @@ -570,13 +572,21 @@ static void dm_end_io_acct(struct dm_io dm_io_acct(io, true); } -static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) +static struct dm_io *alloc_io(struct clone_info *ci, struct mapped_device *md, struct bio *bio) { struct dm_io *io; struct dm_target_io *tio; struct bio *clone; - clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); + if (unlikely(ci->is_nowait)) { + clone = bio_alloc_clone(NULL, bio, GFP_NOWAIT, &md->mempools->io_bs); + if (!clone) { + ci->nowait_failed = true; + return NULL; + } + } else { + clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); + } tio = clone_to_tio(clone); tio->flags = 0; dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO); @@ -1503,6 +1513,11 @@ static void alloc_multiple_bios(struct b while ((bio = bio_list_pop(blist))) free_tio(bio); + + if (ci->is_nowait) { + ci->nowait_failed = true; + return; + } } } @@ -1519,7 +1534,15 @@ static unsigned int __send_duplicate_bio case 1: if (len) setup_split_accounting(ci, *len); - clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); + if (unlikely(ci->is_nowait)) { + clone = alloc_tio(ci, ti, 0, len, GFP_NOWAIT); + if (!clone) { + ci->nowait_failed = true; + return 0; + } + } else { + clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); + } __map_bio(clone); ret = 1; break; @@ -1656,7 +1679,7 @@ static blk_status_t __process_abnormal_i __send_changing_extent_only(ci, ti, num_bios, max_granularity, max_sectors); - return BLK_STS_OK; + return likely(!ci->nowait_failed) ? BLK_STS_OK : BLK_STS_AGAIN; } /* @@ -1729,7 +1752,15 @@ static blk_status_t __split_and_process_ len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); setup_split_accounting(ci, len); - clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); + if (unlikely(ci->is_nowait)) { + clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT); + if (unlikely(!clone)) { + ci->nowait_failed = true; + return BLK_STS_AGAIN; + } + } else { + clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); + } __map_bio(clone); ci->sector += len; @@ -1741,8 +1772,10 @@ static blk_status_t __split_and_process_ static void init_clone_info(struct clone_info *ci, struct mapped_device *md, struct dm_table *map, struct bio *bio, bool is_abnormal) { + ci->is_nowait = !!(bio->bi_opf & REQ_NOWAIT); + ci->nowait_failed = false; ci->map = map; - ci->io = alloc_io(md, bio); + ci->io = alloc_io(ci, md, bio); ci->bio = bio; ci->is_abnormal_io = is_abnormal; ci->submit_as_polled = false; @@ -1778,10 +1811,16 @@ static void dm_split_and_process_bio(str } init_clone_info(&ci, md, map, bio, is_abnormal); + if (unlikely(ci.nowait_failed)) { + error = BLK_STS_AGAIN; + goto out; + } io = ci.io; if (bio->bi_opf & REQ_PREFLUSH) { __send_empty_flush(&ci); + if (unlikely(ci.nowait_failed)) + error = BLK_STS_AGAIN; /* dm_io_complete submits any data associated with flush */ goto out; } -- dm-devel mailing list dm-devel@xxxxxxxxxx https://listman.redhat.com/mailman/listinfo/dm-devel