On Fri, Mar 22, 2019 at 9:14 PM Johannes Thumshirn <jthumshirn@xxxxxxx> wrote: > > From: Jens Axboe <axboe@xxxxxxxxx> > > Kill the BIO_SEG_VALID flag. We should just use ->bi_phys_segments to tell > if it's valid or not. > > This patch uses -1 to signify it's not. > > Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> > [ jth initialize pc bios with 1 phys segment and WARN if we're submitting > bios with -1 segments ] > Signed-off-by: Johannes Thumshirn <jthumshirn@xxxxxxx> > --- > block/bio.c | 15 +++++++++------ > block/blk-core.c | 1 + > block/blk-merge.c | 13 ++++++------- > drivers/md/raid5.c | 2 +- > include/linux/blk_types.h | 1 - > 5 files changed, 17 insertions(+), 15 deletions(-) > > diff --git a/block/bio.c b/block/bio.c > index 71a78d9fb8b7..a79b2bbcffd0 100644 > --- a/block/bio.c > +++ b/block/bio.c > @@ -280,6 +280,7 @@ void bio_init(struct bio *bio, struct bio_vec *table, > unsigned short max_vecs) > { > memset(bio, 0, sizeof(*bio)); > + bio->bi_phys_segments = -1; > atomic_set(&bio->__bi_remaining, 1); > atomic_set(&bio->__bi_cnt, 1); > > @@ -305,6 +306,7 @@ void bio_reset(struct bio *bio) > bio_uninit(bio); > > memset(bio, 0, BIO_RESET_BYTES); > + bio->bi_phys_segments = -1; > bio->bi_flags = flags; > atomic_set(&bio->__bi_remaining, 1); > } > @@ -573,7 +575,7 @@ EXPORT_SYMBOL(bio_put); > > int bio_phys_segments(struct request_queue *q, struct bio *bio) > { > - if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) > + if (unlikely(bio->bi_phys_segments == -1)) > blk_recount_segments(q, bio); > > return bio->bi_phys_segments; > @@ -712,7 +714,10 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page > bvec->bv_len = len; > bvec->bv_offset = offset; > bio->bi_vcnt++; > - bio->bi_phys_segments++; > + if (bio->bi_phys_segments == -1) > + bio->bi_phys_segments = 1; > + else > + bio->bi_phys_segments++; > bio->bi_iter.bi_size += len; > > /* > @@ -731,7 +736,7 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page > > /* If we may be able to merge these biovecs, force a recount */ > if (bio->bi_vcnt > 1 && biovec_phys_mergeable(q, bvec - 1, bvec)) > - bio_clear_flag(bio, BIO_SEG_VALID); > + bio->bi_phys_segments = -1; > > done: > return len; > @@ -1913,10 +1918,8 @@ void bio_trim(struct bio *bio, int offset, int size) > if (offset == 0 && size == bio->bi_iter.bi_size) > return; > > - bio_clear_flag(bio, BIO_SEG_VALID); > - > + bio->bi_phys_segments = -1; > bio_advance(bio, offset << 9); > - > bio->bi_iter.bi_size = size; > > if (bio_integrity(bio)) > diff --git a/block/blk-core.c b/block/blk-core.c > index 4673ebe42255..53372a16dd7c 100644 > --- a/block/blk-core.c > +++ b/block/blk-core.c > @@ -1514,6 +1514,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, > else if (bio_op(bio) == REQ_OP_DISCARD) > rq->nr_phys_segments = 1; > > + WARN_ON(rq->nr_phys_segments == -1); > rq->__data_len = bio->bi_iter.bi_size; > rq->bio = rq->biotail = bio; > > diff --git a/block/blk-merge.c b/block/blk-merge.c > index 1c9d4f0f96ea..16c2ae69c46b 100644 > --- a/block/blk-merge.c > +++ b/block/blk-merge.c > @@ -343,7 +343,6 @@ void blk_queue_split(struct request_queue *q, struct bio **bio) > /* physical segments can be figured out during splitting */ > res = split ? split : *bio; > res->bi_phys_segments = nsegs; > - bio_set_flag(res, BIO_SEG_VALID); > > if (split) { > /* there isn't chance to merge the splitted bio */ > @@ -440,8 +439,6 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio) > bio->bi_next = NULL; > bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); > bio->bi_next = nxt; > - > - bio_set_flag(bio, BIO_SEG_VALID); > } > > static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, > @@ -620,6 +617,8 @@ static inline int ll_new_hw_segment(struct request_queue *q, > { > int nr_phys_segs = bio_phys_segments(q, bio); > > + if (WARN_ON(nr_phys_segs == -1)) > + nr_phys_segs = 0; > if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) > goto no_merge; > > @@ -651,9 +650,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, > req_set_nomerge(q, req); > return 0; > } > - if (!bio_flagged(req->biotail, BIO_SEG_VALID)) > + if (req->biotail->bi_phys_segments == -1) > blk_recount_segments(q, req->biotail); > - if (!bio_flagged(bio, BIO_SEG_VALID)) > + if (bio->bi_phys_segments == -1) > blk_recount_segments(q, bio); > > return ll_new_hw_segment(q, req, bio); > @@ -673,9 +672,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, > req_set_nomerge(q, req); > return 0; > } > - if (!bio_flagged(bio, BIO_SEG_VALID)) > + if (bio->bi_phys_segments == -1) > blk_recount_segments(q, bio); > - if (!bio_flagged(req->bio, BIO_SEG_VALID)) > + if (req->bio->bi_phys_segments == -1) > blk_recount_segments(q, req->bio); > > return ll_new_hw_segment(q, req, bio); > diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c > index c033bfcb209e..79eb54dcf0f9 100644 > --- a/drivers/md/raid5.c > +++ b/drivers/md/raid5.c > @@ -5247,7 +5247,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) > rcu_read_unlock(); > raid_bio->bi_next = (void*)rdev; > bio_set_dev(align_bi, rdev->bdev); > - bio_clear_flag(align_bi, BIO_SEG_VALID); > + align_bi->bi_phys_segments = -1; > > if (is_badblock(rdev, align_bi->bi_iter.bi_sector, > bio_sectors(align_bi), > diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h > index d66bf5f32610..472059e92071 100644 > --- a/include/linux/blk_types.h > +++ b/include/linux/blk_types.h > @@ -215,7 +215,6 @@ struct bio { > /* > * bio flags > */ > -#define BIO_SEG_VALID 1 /* bi_phys_segments valid */ > #define BIO_CLONED 2 /* doesn't own data */ > #define BIO_BOUNCED 3 /* bio is a bounce bio */ > #define BIO_USER_MAPPED 4 /* contains user pages */ Some drivers set max segments as USHRT_MAX, will this patch break this kind of driver? Thanks, Ming Lei