Currently an IO size is limited to the request_queue limits max_sectors. Limit the size for an atomic write to queue limit atomic_write_max_sectors value. Signed-off-by: John Garry <john.g.garry@xxxxxxxxxx> --- block/blk-merge.c | 12 +++++++++++- block/blk.h | 3 +++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/block/blk-merge.c b/block/blk-merge.c index 0ccc251e22ff..8d4de9253fe9 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -171,7 +171,17 @@ static inline unsigned get_max_io_size(struct bio *bio, { unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT; unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT; - unsigned max_sectors = lim->max_sectors, start, end; + unsigned max_sectors, start, end; + + /* + * We ignore lim->max_sectors for atomic writes simply because + * it may less than bio->write_atomic_unit, which we cannot + * tolerate. + */ + if (bio->bi_opf & REQ_ATOMIC) + max_sectors = lim->atomic_write_max_sectors; + else + max_sectors = lim->max_sectors; if (lim->chunk_sectors) { max_sectors = min(max_sectors, diff --git a/block/blk.h b/block/blk.h index 94e330e9c853..6f6cd5b1830a 100644 --- a/block/blk.h +++ b/block/blk.h @@ -178,6 +178,9 @@ static inline unsigned int blk_queue_get_max_sectors(struct request *rq) if (unlikely(op == REQ_OP_WRITE_ZEROES)) return q->limits.max_write_zeroes_sectors; + if (rq->cmd_flags & REQ_ATOMIC) + return q->limits.atomic_write_max_sectors; + return q->limits.max_sectors; } -- 2.35.3