Currently an IO size is limited to the request_queue limits max_sectors. Limit the size for an atomic write to queue limit atomic_write_max_sectors value. Signed-off-by: John Garry <john.g.garry@xxxxxxxxxx> --- block/blk-merge.c | 11 ++++++++++- block/blk.h | 3 +++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/block/blk-merge.c b/block/blk-merge.c index 74e9e775f13d..6306a2c82354 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -167,7 +167,16 @@ static inline unsigned get_max_io_size(struct bio *bio, { unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT; unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT; - unsigned max_sectors = lim->max_sectors, start, end; + unsigned max_sectors, start, end; + + /* + * We ignore lim->max_sectors for atomic writes simply because + * it may less than the bio size, which we cannot tolerate. + */ + if (bio->bi_opf & REQ_ATOMIC) + max_sectors = lim->atomic_write_max_sectors; + else + max_sectors = lim->max_sectors; if (lim->chunk_sectors) { max_sectors = min(max_sectors, diff --git a/block/blk.h b/block/blk.h index 050696131329..6ba8333fcf26 100644 --- a/block/blk.h +++ b/block/blk.h @@ -178,6 +178,9 @@ static inline unsigned int blk_queue_get_max_sectors(struct request *rq) if (unlikely(op == REQ_OP_WRITE_ZEROES)) return q->limits.max_write_zeroes_sectors; + if (rq->cmd_flags & REQ_ATOMIC) + return q->limits.atomic_write_max_sectors; + return q->limits.max_sectors; } -- 2.31.1