Next patch is going to make generic_make_request() handle arbitrary sized bios by splitting them if necessary. It makes more sense to call blk_queue_bounce() first, before the bios have been fragmented. Also, __blk_recalc_rq_segments() now doesn't have to take into account potential bouncing - it's already been done. Signed-off-by: Kent Overstreet <koverstreet@xxxxxxxxxx> Cc: Jens Axboe <axboe@xxxxxxxxx> Cc: Jiri Kosina <jkosina@xxxxxxx> Cc: Asai Thambi S P <asamymuthupa@xxxxxxxxxx> --- block/blk-core.c | 14 +++++++------- block/blk-merge.c | 15 ++++----------- drivers/block/mtip32xx/mtip32xx.c | 2 -- drivers/block/pktcdvd.c | 2 -- 4 files changed, 11 insertions(+), 22 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 0704c5c..4d6eb60 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1470,13 +1470,6 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio) struct request *req; unsigned int request_count = 0; - /* - * low level driver can indicate that it wants pages above a - * certain limit bounced to low memory (ie for highmem, or even - * ISA dma in theory) - */ - blk_queue_bounce(q, &bio); - if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { bio_endio(bio, -EIO); return; @@ -1828,6 +1821,13 @@ void generic_make_request(struct bio *bio) do { struct request_queue *q = bdev_get_queue(bio->bi_bdev); + /* + * low level driver can indicate that it wants pages above a + * certain limit bounced to low memory (ie for highmem, or even + * ISA dma in theory) + */ + blk_queue_bounce(q, &bio); + q->make_request_fn(q, bio); bio = bio_list_pop(current->bio_list); diff --git a/block/blk-merge.c b/block/blk-merge.c index b53ddac..ba48830 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -13,7 +13,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, struct bio *bio) { struct bio_vec bv, bvprv; - int cluster, high, highprv = 1; + int cluster, prev = 0; unsigned int seg_size, nr_phys_segs; struct bio *fbio, *bbio; struct bvec_iter iter; @@ -27,15 +27,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, nr_phys_segs = 0; for_each_bio(bio) { bio_for_each_segment(bv, bio, iter) { - /* - * the trick here is making sure that a high page is - * never considered part of another segment, since that - * might change with the bounce page. - */ - high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); - if (high || highprv) - goto new_segment; - if (cluster) { + if (prev && cluster) { if (seg_size + bv.bv_len > queue_max_segment_size(q)) goto new_segment; @@ -46,6 +38,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, seg_size += bv.bv_len; bvprv = bv; + prev = 1; continue; } new_segment: @@ -55,8 +48,8 @@ new_segment: nr_phys_segs++; bvprv = bv; + prev = 1; seg_size = bv.bv_len; - highprv = high; } bbio = bio; } diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 9fd1751..089cda3 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3912,8 +3912,6 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) sg = mtip_hw_get_scatterlist(dd, &tag, unaligned); if (likely(sg != NULL)) { - blk_queue_bounce(queue, &bio); - if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) { dev_warn(&dd->pdev->dev, "Maximum number of SGL entries exceeded\n"); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index a929817..3f6ad2b 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2486,8 +2486,6 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio) goto end_io; } - blk_queue_bounce(q, &bio); - do { sector_t zone = ZONE(bio->bi_iter.bi_sector, pd); sector_t last_zone = ZONE(bio_end_sector(bio) - 1, pd); -- 1.8.3.rc1 -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html