Add blk_rq_map_user_bvec which maps the bvec iterator into a bio and places that into the request. This helper will be used in nvme for uring-passthrough with fixed-buffer. While at it, create another helper bio_map_get to reduce the code duplication. Signed-off-by: Kanchan Joshi <joshi.k@xxxxxxxxxxx> Signed-off-by: Anuj Gupta <anuj20.g@xxxxxxxxxxx> --- block/blk-map.c | 111 +++++++++++++++++++++++++++++++++++++---- include/linux/blk-mq.h | 1 + 2 files changed, 102 insertions(+), 10 deletions(-) diff --git a/block/blk-map.c b/block/blk-map.c index 7693f8e3c454..d6265d49b15b 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -241,17 +241,10 @@ static void bio_map_put(struct bio *bio) } } -static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, +static struct bio *bio_map_get(struct request *rq, unsigned int nr_vecs, gfp_t gfp_mask) { - unsigned int max_sectors = queue_max_hw_sectors(rq->q); - unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS); struct bio *bio; - int ret; - int j; - - if (!iov_iter_count(iter)) - return -EINVAL; if (rq->cmd_flags & REQ_POLLED) { blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE; @@ -259,13 +252,31 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask, &fs_bio_set); if (!bio) - return -ENOMEM; + return NULL; } else { bio = bio_kmalloc(nr_vecs, gfp_mask); if (!bio) - return -ENOMEM; + return NULL; bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq)); } + return bio; +} + +static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, + gfp_t gfp_mask) +{ + unsigned int max_sectors = queue_max_hw_sectors(rq->q); + unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS); + struct bio *bio; + int ret; + int j; + + if (!iov_iter_count(iter)) + return -EINVAL; + + bio = bio_map_get(rq, nr_vecs, gfp_mask); + if (bio == NULL) + return -ENOMEM; while (iov_iter_count(iter)) { struct page **pages, *stack_pages[UIO_FASTIOV]; @@ -611,6 +622,86 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, } EXPORT_SYMBOL(blk_rq_map_user); +/* Prepare bio for passthrough IO given an existing bvec iter */ +int blk_rq_map_user_bvec(struct request *rq, struct iov_iter *iter) +{ + struct request_queue *q = rq->q; + size_t nr_iter, nr_segs, i; + struct bio *bio = NULL; + struct bio_vec *bv, *bvecs, *bvprvp = NULL; + struct queue_limits *lim = &q->limits; + unsigned int nsegs = 0, bytes = 0; + bool copy = false; + int ret; + unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); + + /* see if we need to copy pages due to any weird situation */ + if (blk_queue_may_bounce(q)) + copy = true; + else if (iov_iter_alignment(iter) & align) + copy = true; + + if (copy) { + do { + ret = bio_copy_user_iov(rq, NULL, iter, GFP_KERNEL); + if (ret) { + blk_rq_unmap_user(bio); + rq->bio = NULL; + break; + } + if (!bio) + bio = rq->bio; + } while (iov_iter_count(iter)); + + return ret; + } + /* common (non-copy) case handling */ + nr_iter = iov_iter_count(iter); + nr_segs = iter->nr_segs; + + if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q)) + return -EINVAL; + if (nr_segs > queue_max_segments(q)) + return -EINVAL; + + /* no iovecs to alloc, as we already have a BVEC iterator */ + bio = bio_map_get(rq, 0, GFP_KERNEL); + if (bio == NULL) + return -ENOMEM; + + bio_iov_bvec_set(bio, iter); + blk_rq_bio_prep(rq, bio, nr_segs); + + /* loop to perform a bunch of sanity checks */ + bvecs = (struct bio_vec *)iter->bvec; + for (i = 0; i < nr_segs; i++) { + bv = &bvecs[i]; + /* + * If the queue doesn't support SG gaps and adding this + * offset would create a gap, disallow it. + */ + if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) + goto put_bio; + + /* check full condition */ + if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len) + goto put_bio; + if (bytes + bv->bv_len > nr_iter) + goto put_bio; + if (bv->bv_offset + bv->bv_len > PAGE_SIZE) + goto put_bio; + + nsegs++; + bytes += bv->bv_len; + bvprvp = bv; + } + return 0; +put_bio: + bio_map_put(bio); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(blk_rq_map_user_bvec); + /** * blk_rq_unmap_user - unmap a request with user data * @bio: start of bio list diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 00a15808c137..1a9ae17e49be 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -977,6 +977,7 @@ struct rq_map_data { bool from_user; }; +int blk_rq_map_user_bvec(struct request *rq, struct iov_iter *iter); int blk_rq_map_user(struct request_queue *, struct request *, struct rq_map_data *, void __user *, unsigned long, gfp_t); int blk_rq_map_user_iov(struct request_queue *, struct request *, -- 2.25.1