> On 4/12/21 7:55 PM, Changheun Lee wrote: > > +unsigned int bio_max_size(struct bio *bio) > > +{ > > + struct request_queue *q = bio->bi_bdev->bd_disk->queue; > > + > > + if (blk_queue_limit_bio_size(q)) > > + return blk_queue_get_max_sectors(q, bio_op(bio)) > > + << SECTOR_SHIFT; > > + > > + return UINT_MAX; > > +} > > This patch adds an if-statement to the hot path and that may have a > slight negative performance impact. I recommend to follow the approach > of max_hw_sectors. That means removing QUEUE_FLAG_LIMIT_BIO_SIZE and to > initialize the maximum bio size to UINT_MAX in blk_set_default_limits(). > > Thanks, > > Bart. I modified as Bart's approach. Thanks for your advice. It's more simple than before. I think it looks good. Please, review below. I'll prepare new version base on this. diff --git a/block/bio.c b/block/bio.c index 50e579088aca..9e5061ecc317 100644 --- a/block/bio.c +++ b/block/bio.c @@ -255,6 +255,13 @@ void bio_init(struct bio *bio, struct bio_vec *table, } EXPORT_SYMBOL(bio_init); +unsigned int bio_max_size(struct bio *bio) +{ + struct request_queue *q = bio->bi_bdev->bd_disk->queue; + + return q->limits.bio_max_bytes; +} + /** * bio_reset - reinitialize a bio * @bio: bio to reset @@ -866,7 +873,7 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page, struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; if (page_is_mergeable(bv, page, len, off, same_page)) { - if (bio->bi_iter.bi_size > UINT_MAX - len) { + if (bio->bi_iter.bi_size > bio_max_size(bio) - len) { *same_page = false; return false; } diff --git a/block/blk-settings.c b/block/blk-settings.c index b4aa2f37fab6..b167e8db856b 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -37,6 +37,7 @@ EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); */ void blk_set_default_limits(struct queue_limits *lim) { + lim->bio_max_bytes = UINT_MAX; lim->max_segments = BLK_MAX_SEGMENTS; lim->max_discard_segments = 1; lim->max_integrity_segments = 0; @@ -167,6 +168,7 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto max_sectors = round_down(max_sectors, limits->logical_block_size >> SECTOR_SHIFT); limits->max_sectors = max_sectors; + limits->bio_max_bytes = max_sectors << SECTOR_SHIFT; q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9); } @@ -538,6 +540,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, { unsigned int top, bottom, alignment, ret = 0; + t->bio_max_bytes = min_not_zero(t->bio_max_bytes, b->bio_max_bytes); + t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); diff --git a/include/linux/bio.h b/include/linux/bio.h index d0246c92a6e8..e5add63da3af 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -106,6 +106,8 @@ static inline void *bio_data(struct bio *bio) return NULL; } +extern unsigned int bio_max_size(struct bio *bio); + /** * bio_full - check if the bio is full * @bio: bio to check @@ -119,7 +121,7 @@ static inline bool bio_full(struct bio *bio, unsigned len) if (bio->bi_vcnt >= bio->bi_max_vecs) return true; - if (bio->bi_iter.bi_size > UINT_MAX - len) + if (bio->bi_iter.bi_size > bio_max_size(bio) - len) return true; return false; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 158aefae1030..c205d60ac611 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -312,6 +312,8 @@ enum blk_zoned_model { }; struct queue_limits { + unsigned int bio_max_bytes; + unsigned long bounce_pfn; unsigned long seg_boundary_mask; unsigned long virt_boundary_mask;