Add blk_rq_map_user_bvec which maps the pages from bvec iterator into a bio, and places the bio into the request. This helper will be used by nvme for uring-passthrough path with pre-mapped buffers. Signed-off-by: Kanchan Joshi <joshi.k@xxxxxxxxxxx> Signed-off-by: Anuj Gupta <anuj20.g@xxxxxxxxxxx> --- block/blk-map.c | 80 ++++++++++++++++++++++++++++++++++++++++++ include/linux/blk-mq.h | 1 + 2 files changed, 81 insertions(+) diff --git a/block/blk-map.c b/block/blk-map.c index a7838879e28e..d6265d49b15b 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -622,6 +622,86 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, } EXPORT_SYMBOL(blk_rq_map_user); +/* Prepare bio for passthrough IO given an existing bvec iter */ +int blk_rq_map_user_bvec(struct request *rq, struct iov_iter *iter) +{ + struct request_queue *q = rq->q; + size_t nr_iter, nr_segs, i; + struct bio *bio = NULL; + struct bio_vec *bv, *bvecs, *bvprvp = NULL; + struct queue_limits *lim = &q->limits; + unsigned int nsegs = 0, bytes = 0; + bool copy = false; + int ret; + unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); + + /* see if we need to copy pages due to any weird situation */ + if (blk_queue_may_bounce(q)) + copy = true; + else if (iov_iter_alignment(iter) & align) + copy = true; + + if (copy) { + do { + ret = bio_copy_user_iov(rq, NULL, iter, GFP_KERNEL); + if (ret) { + blk_rq_unmap_user(bio); + rq->bio = NULL; + break; + } + if (!bio) + bio = rq->bio; + } while (iov_iter_count(iter)); + + return ret; + } + /* common (non-copy) case handling */ + nr_iter = iov_iter_count(iter); + nr_segs = iter->nr_segs; + + if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q)) + return -EINVAL; + if (nr_segs > queue_max_segments(q)) + return -EINVAL; + + /* no iovecs to alloc, as we already have a BVEC iterator */ + bio = bio_map_get(rq, 0, GFP_KERNEL); + if (bio == NULL) + return -ENOMEM; + + bio_iov_bvec_set(bio, iter); + blk_rq_bio_prep(rq, bio, nr_segs); + + /* loop to perform a bunch of sanity checks */ + bvecs = (struct bio_vec *)iter->bvec; + for (i = 0; i < nr_segs; i++) { + bv = &bvecs[i]; + /* + * If the queue doesn't support SG gaps and adding this + * offset would create a gap, disallow it. + */ + if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) + goto put_bio; + + /* check full condition */ + if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len) + goto put_bio; + if (bytes + bv->bv_len > nr_iter) + goto put_bio; + if (bv->bv_offset + bv->bv_len > PAGE_SIZE) + goto put_bio; + + nsegs++; + bytes += bv->bv_len; + bvprvp = bv; + } + return 0; +put_bio: + bio_map_put(bio); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(blk_rq_map_user_bvec); + /** * blk_rq_unmap_user - unmap a request with user data * @bio: start of bio list diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 00a15808c137..1a9ae17e49be 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -977,6 +977,7 @@ struct rq_map_data { bool from_user; }; +int blk_rq_map_user_bvec(struct request *rq, struct iov_iter *iter); int blk_rq_map_user(struct request_queue *, struct request *, struct rq_map_data *, void __user *, unsigned long, gfp_t); int blk_rq_map_user_iov(struct request_queue *, struct request *, -- 2.25.1