Inside __blk_segment_map_sg(), page sized bvec mapping is optimized a bit with one standalone branch. So reuse __blk_bvec_map_sg() to do that. Cc: Omar Sandoval <osandov@xxxxxx> Cc: Christoph Hellwig <hch@xxxxxx> Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- block/blk-merge.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/block/blk-merge.c b/block/blk-merge.c index 9ec704bb58ec..3e934ee9a907 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -493,6 +493,14 @@ static unsigned blk_bvec_map_sg(struct request_queue *q, return nsegs; } +static inline int __blk_bvec_map_sg(struct bio_vec bv, + struct scatterlist *sglist, struct scatterlist **sg) +{ + *sg = blk_next_sg(sg, sglist); + sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); + return 1; +} + static inline void __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, struct scatterlist *sglist, struct bio_vec *bvprv, @@ -511,23 +519,13 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, } else { new_segment: if (bvec->bv_offset + bvec->bv_len <= PAGE_SIZE) { - *sg = blk_next_sg(sg, sglist); - sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); - (*nsegs) += 1; + (*nsegs) += __blk_bvec_map_sg(*bvec, sglist, sg); } else (*nsegs) += blk_bvec_map_sg(q, bvec, sglist, sg); } *bvprv = *bvec; } -static inline int __blk_bvec_map_sg(struct bio_vec bv, - struct scatterlist *sglist, struct scatterlist **sg) -{ - *sg = sglist; - sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); - return 1; -} - static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, struct scatterlist *sglist, struct scatterlist **sg) -- 2.9.5