From: Keith Busch <kbusch@xxxxxxxxxx> Set the bio's bi_end_io to handle the cleanup so that uring_cmd doesn't need this complex pdu->{bio,req} switchero and restore. Signed-off-by: Keith Busch <kbusch@xxxxxxxxxx> --- drivers/nvme/host/ioctl.c | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index d24ea2e051564..278c57ee0db91 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -159,6 +159,11 @@ static struct request *nvme_alloc_user_request(struct request_queue *q, return req; } +static void nvme_uring_bio_end_io(struct bio *bio) +{ + blk_rq_unmap_user(bio); +} + static int nvme_map_user_request(struct request *req, u64 ubuffer, unsigned bufflen, void __user *meta_buffer, unsigned meta_len, u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd, @@ -204,6 +209,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer, *metap = meta; } + bio->bi_end_io = nvme_uring_bio_end_io; return ret; out_unmap: @@ -249,8 +255,6 @@ static int nvme_submit_user_cmd(struct request_queue *q, if (meta) ret = nvme_finish_user_metadata(req, meta_buffer, meta, meta_len, ret); - if (bio) - blk_rq_unmap_user(bio); blk_mq_free_request(req); if (effects) @@ -443,10 +447,7 @@ struct nvme_uring_data { * Expect build errors if this grows larger than that. */ struct nvme_uring_cmd_pdu { - union { - struct bio *bio; - struct request *req; - }; + struct request *req; u32 meta_len; u32 nvme_status; union { @@ -482,8 +483,6 @@ static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd, if (pdu->meta_len) status = nvme_finish_user_metadata(req, pdu->u.meta_buffer, pdu->u.meta, pdu->meta_len, status); - if (req->bio) - blk_rq_unmap_user(req->bio); blk_mq_free_request(req); io_uring_cmd_done(ioucmd, status, result, issue_flags); @@ -494,9 +493,6 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd, { struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); - if (pdu->bio) - blk_rq_unmap_user(pdu->bio); - io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags); } @@ -507,7 +503,6 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); void *cookie = READ_ONCE(ioucmd->cookie); - req->bio = pdu->bio; if (nvme_req(req)->flags & NVME_REQ_CANCELLED) pdu->nvme_status = -EINTR; else @@ -533,9 +528,6 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req, struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); void *cookie = READ_ONCE(ioucmd->cookie); - req->bio = pdu->bio; - pdu->req = req; - /* * For iopoll, complete it directly. * Otherwise, move the completion to task work. @@ -624,8 +616,8 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, req->bio->bi_opf |= REQ_POLLED; } } - /* to free bio on completion, as req->bio will be null at that time */ - pdu->bio = req->bio; + + pdu->req = req; pdu->meta_len = d.metadata_len; req->end_io_data = ioucmd; if (pdu->meta_len) { -- 2.34.1