Add blk_rq_map_user_bvec which maps the bvec iterator into a bio and places that into the request. This helper is to be used in nvme for uring-passthrough with fixed-buffer. Signed-off-by: Kanchan Joshi <joshi.k@xxxxxxxxxxx> Signed-off-by: Anuj Gupta <anuj20.g@xxxxxxxxxxx> --- block/blk-map.c | 71 ++++++++++++++++++++++++++++++++++++++++++ include/linux/blk-mq.h | 1 + 2 files changed, 72 insertions(+) diff --git a/block/blk-map.c b/block/blk-map.c index d0ff80a9902e..ee17cc78bf00 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -611,6 +611,77 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, } EXPORT_SYMBOL(blk_rq_map_user); +/* Prepare bio for passthrough IO given an existing bvec iter */ +int blk_rq_map_user_bvec(struct request *rq, struct iov_iter *iter) +{ + struct request_queue *q = rq->q; + size_t iter_count, nr_segs; + struct bio *bio; + struct bio_vec *bv, *bvec_arr, *bvprvp = NULL; + struct queue_limits *lim = &q->limits; + unsigned int nsegs = 0, bytes = 0; + int ret, i; + + iter_count = iov_iter_count(iter); + nr_segs = iter->nr_segs; + + if (!iter_count || (iter_count >> 9) > queue_max_hw_sectors(q)) + return -EINVAL; + if (nr_segs > queue_max_segments(q)) + return -EINVAL; + if (rq->cmd_flags & REQ_POLLED) { + blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE; + + /* no iovecs to alloc, as we already have a BVEC iterator */ + bio = bio_alloc_bioset(NULL, 0, opf, GFP_KERNEL, + &fs_bio_set); + if (!bio) + return -ENOMEM; + } else { + bio = bio_kmalloc(0, GFP_KERNEL); + if (!bio) + return -ENOMEM; + bio_init(bio, NULL, bio->bi_inline_vecs, 0, req_op(rq)); + } + bio_iov_bvec_set(bio, iter); + blk_rq_bio_prep(rq, bio, nr_segs); + + /* loop to perform a bunch of sanity checks */ + bvec_arr = (struct bio_vec *)iter->bvec; + for (i = 0; i < nr_segs; i++) { + bv = &bvec_arr[i]; + /* + * If the queue doesn't support SG gaps and adding this + * offset would create a gap, disallow it. + */ + if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) { + ret = -EINVAL; + goto out_free; + } + + /* check full condition */ + if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len) { + ret = -EINVAL; + goto out_free; + } + + if (bytes + bv->bv_len <= iter_count && + bv->bv_offset + bv->bv_len <= PAGE_SIZE) { + nsegs++; + bytes += bv->bv_len; + } else { + ret = -EINVAL; + goto out_free; + } + bvprvp = bv; + } + return 0; +out_free: + bio_map_put(bio); + return ret; +} +EXPORT_SYMBOL(blk_rq_map_user_bvec); + /** * blk_rq_unmap_user - unmap a request with user data * @bio: start of bio list diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 9d1af7a0a401..890c342ab3f3 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -971,6 +971,7 @@ struct rq_map_data { bool from_user; }; +int blk_rq_map_user_bvec(struct request *rq, struct iov_iter *iter); int blk_rq_map_user(struct request_queue *, struct request *, struct rq_map_data *, void __user *, unsigned long, gfp_t); int blk_rq_map_user_iov(struct request_queue *, struct request *, -- 2.25.1