From: Christoph Hellwig <hch@xxxxxx> Divide the work into two helpers, namely nvme_alloc_user_request and nvme_execute_user_rq. This is a prep patch, that will help wiring up uring-cmd support in nvme. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- drivers/nvme/host/ioctl.c | 47 ++++++++++++++++++++++++++++++++++----- 1 file changed, 42 insertions(+), 5 deletions(-) diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 554566371ffa..3531de8073a6 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -19,6 +19,13 @@ static void __user *nvme_to_user_ptr(uintptr_t ptrval) return (void __user *)ptrval; } +static inline void *nvme_meta_from_bio(struct bio *bio) +{ + struct bio_integrity_payload *bip = bio_integrity(bio); + + return bip ? bvec_virt(bip->bip_vec) : NULL; +} + static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf, unsigned len, u32 seed, bool write) { @@ -53,10 +60,10 @@ static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf, return ERR_PTR(ret); } -static int nvme_submit_user_cmd(struct request_queue *q, +static struct request *nvme_alloc_user_request(struct request_queue *q, struct nvme_command *cmd, void __user *ubuffer, unsigned bufflen, void __user *meta_buffer, unsigned meta_len, - u32 meta_seed, u64 *result, unsigned timeout, bool vec) + u32 meta_seed, unsigned timeout, bool vec) { bool write = nvme_is_write(cmd); struct nvme_ns *ns = q->queuedata; @@ -68,7 +75,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, req = blk_mq_alloc_request(q, nvme_req_op(cmd), 0); if (IS_ERR(req)) - return PTR_ERR(req); + return req; nvme_init_request(req, cmd); if (timeout) @@ -108,7 +115,26 @@ static int nvme_submit_user_cmd(struct request_queue *q, } } + return req; + +out_unmap: + if (bio) + blk_rq_unmap_user(bio); +out: + blk_mq_free_request(req); + return ERR_PTR(ret); +} + +static int nvme_execute_user_rq(struct request *req, void __user *meta_buffer, + unsigned meta_len, u64 *result) +{ + struct bio *bio = req->bio; + bool write = bio_op(bio) == REQ_OP_DRV_OUT; + int ret; + void *meta = nvme_meta_from_bio(bio); + ret = nvme_execute_passthru_rq(req); + if (result) *result = le64_to_cpu(nvme_req(req)->result.u64); if (meta && !ret && !write) { @@ -116,14 +142,25 @@ static int nvme_submit_user_cmd(struct request_queue *q, ret = -EFAULT; } kfree(meta); - out_unmap: if (bio) blk_rq_unmap_user(bio); - out: blk_mq_free_request(req); return ret; } +static int nvme_submit_user_cmd(struct request_queue *q, + struct nvme_command *cmd, void __user *ubuffer, + unsigned bufflen, void __user *meta_buffer, unsigned meta_len, + u32 meta_seed, u64 *result, unsigned timeout, bool vec) +{ + struct request *req; + + req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer, + meta_len, meta_seed, timeout, vec); + if (IS_ERR(req)) + return PTR_ERR(req); + return nvme_execute_user_rq(req, meta_buffer, meta_len, result); +} static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) { -- 2.25.1