On Wed, Sep 07, 2022 at 02:51:31PM +0000, Chaitanya Kulkarni wrote:
req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer,
- meta_len, meta_seed, &meta, timeout, vec, 0, 0);
+ meta_len, meta_seed, &meta, timeout, vec, 0, 0, NULL, 0);
if (IS_ERR(req))
return PTR_ERR(req);
14 Arguments to the function!
Kanchan, I'm not pointing out to this patch it has happened over
the years, I think it is high time we find a way to trim this
down.
Least we can do is to pass a structure member than 14 different
arguments, is everyone okay with it ?
Maybe it's just me, but there is something (unrelatedness) about these
fields which makes packing all these into a single container feel bit
unnatural (or do you already have a good name for such struct?).
So how about we split the nvme_alloc_user_request into two.
That will also serve the purpose.
Here is a patch that creates
- new nvme_alloc_user_request with 5 parameters
- new nvme_map_user_request with 8 parameters
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 548aca8b5b9f..cb2fa4db50dd 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -65,18 +65,10 @@ static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
}
static struct request *nvme_alloc_user_request(struct request_queue *q,
- struct nvme_command *cmd, void __user *ubuffer,
- unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- u32 meta_seed, void **metap, unsigned timeout, bool vec,
+ struct nvme_command *cmd, unsigned timeout,
blk_opf_t rq_flags, blk_mq_req_flags_t blk_flags)
{
- bool write = nvme_is_write(cmd);
- struct nvme_ns *ns = q->queuedata;
- struct block_device *bdev = ns ? ns->disk->part0 : NULL;
struct request *req;
- struct bio *bio = NULL;
- void *meta = NULL;
- int ret;
req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
if (IS_ERR(req))
@@ -86,49 +78,61 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
if (timeout)
req->timeout = timeout;
nvme_req(req)->flags |= NVME_REQ_USERCMD;
+ return req;
+}
- if (ubuffer && bufflen) {
- if (!vec)
- ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
- GFP_KERNEL);
- else {
- struct iovec fast_iov[UIO_FASTIOV];
- struct iovec *iov = fast_iov;
- struct iov_iter iter;
-
- ret = import_iovec(rq_data_dir(req), ubuffer, bufflen,
- UIO_FASTIOV, &iov, &iter);
- if (ret < 0)
- goto out;
- ret = blk_rq_map_user_iov(q, req, NULL, &iter,
- GFP_KERNEL);
- kfree(iov);
- }
- if (ret)
+static int nvme_map_user_request(struct request *req, void __user *ubuffer,
+ unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
+ u32 meta_seed, void **metap, bool vec)
+{
+ struct request_queue *q = req->q;
+ struct nvme_ns *ns = q->queuedata;
+ struct block_device *bdev = ns ? ns->disk->part0 : NULL;
+ struct bio *bio = NULL;
+ void *meta = NULL;
+ int ret;
+
+ if (!ubuffer || !bufflen)
+ return 0;
+
+ if (!vec)
+ ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
+ GFP_KERNEL);
+ else {
+ struct iovec fast_iov[UIO_FASTIOV];
+ struct iovec *iov = fast_iov;
+ struct iov_iter iter;
+
+ ret = import_iovec(rq_data_dir(req), ubuffer, bufflen,
+ UIO_FASTIOV, &iov, &iter);
+ if (ret < 0)
goto out;
- bio = req->bio;
- if (bdev)
- bio_set_dev(bio, bdev);
- if (bdev && meta_buffer && meta_len) {
- meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
- meta_seed, write);
- if (IS_ERR(meta)) {
- ret = PTR_ERR(meta);
- goto out_unmap;
- }
- req->cmd_flags |= REQ_INTEGRITY;
- *metap = meta;
+ ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
+ kfree(iov);
+ }
+ bio = req->bio;
+ if (ret)
+ goto out_unmap;
+ if (bdev)
+ bio_set_dev(bio, bdev);
+ if (bdev && meta_buffer && meta_len) {
+ meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
+ meta_seed, req_op(req) == REQ_OP_DRV_OUT);
+ if (IS_ERR(meta)) {
+ ret = PTR_ERR(meta);
+ goto out_unmap;
}
+ req->cmd_flags |= REQ_INTEGRITY;
+ *metap = meta;
}
- return req;
+ return ret;
out_unmap:
if (bio)
blk_rq_unmap_user(bio);
out:
- blk_mq_free_request(req);
- return ERR_PTR(ret);
+ return ret;
}
static int nvme_submit_user_cmd(struct request_queue *q,
@@ -141,13 +145,16 @@ static int nvme_submit_user_cmd(struct request_queue *q,
struct bio *bio;
int ret;
- req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer,
- meta_len, meta_seed, &meta, timeout, vec, 0, 0);
+ req = nvme_alloc_user_request(q, cmd, timeout, 0, 0);
if (IS_ERR(req))
return PTR_ERR(req);
- bio = req->bio;
+ ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
+ meta_len, meta_seed, &meta, vec);
+ if (ret)
+ goto out;
+ bio = req->bio;
ret = nvme_execute_passthru_rq(req);
if (result)
@@ -157,6 +164,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
meta_len, ret);
if (bio)
blk_rq_unmap_user(bio);
+out:
blk_mq_free_request(req);
return ret;
}
@@ -418,6 +426,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
blk_opf_t rq_flags = 0;
blk_mq_req_flags_t blk_flags = 0;
void *meta = NULL;
+ int ret;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -457,13 +466,17 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
rq_flags |= REQ_POLLED;
retry:
- req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr),
- d.data_len, nvme_to_user_ptr(d.metadata),
- d.metadata_len, 0, &meta, d.timeout_ms ?
- msecs_to_jiffies(d.timeout_ms) : 0, vec, rq_flags,
- blk_flags);
+ req = nvme_alloc_user_request(q, &c,
+ d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0,
+ rq_flags, blk_flags);
if (IS_ERR(req))
return PTR_ERR(req);
+
+ ret = nvme_map_user_request(req, nvme_to_user_ptr(d.addr),
+ d.data_len, nvme_to_user_ptr(d.metadata),
+ d.metadata_len, 0, &meta, vec);
+ if (ret)
+ goto out_err;
req->end_io = nvme_uring_cmd_end_io;
req->end_io_data = ioucmd;
@@ -486,6 +499,9 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
blk_execute_rq_nowait(req, false);
return -EIOCBQUEUED;
+out_err:
+ blk_mq_free_request(req);
+ return ret;
}
static bool is_ctrl_ioctl(unsigned int cmd)