On Fri, Sep 02, 2022 at 05:00:51PM -0600, Jens Axboe wrote:
Don't need to rely on the cookie or request type, set the right handler
based on how we're handling the IO.
Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>
---
drivers/nvme/host/ioctl.c | 30 ++++++++++++++++++++++--------
1 file changed, 22 insertions(+), 8 deletions(-)
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 7756b439a688..f34abe95821e 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -385,25 +385,36 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
io_uring_cmd_done(ioucmd, status, result);
}
-static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
+static void nvme_uring_iopoll_cmd_end_io(struct request *req, blk_status_t err)
{
struct io_uring_cmd *ioucmd = req->end_io_data;
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
/* extract bio before reusing the same field for request */
struct bio *bio = pdu->bio;
- void *cookie = READ_ONCE(ioucmd->cookie);
pdu->req = req;
req->bio = bio;
/*
* For iopoll, complete it directly.
- * Otherwise, move the completion to task work.
*/
- if (cookie != NULL && blk_rq_is_poll(req))
- nvme_uring_task_cb(ioucmd);
- else
- io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
+ nvme_uring_task_cb(ioucmd);
+}
+
+static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
+{
+ struct io_uring_cmd *ioucmd = req->end_io_data;
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ /* extract bio before reusing the same field for request */
+ struct bio *bio = pdu->bio;
+
+ pdu->req = req;
+ req->bio = bio;
+
+ /*
+ * Move the completion to task work.
+ */
+ io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
}
static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
@@ -464,7 +475,10 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
blk_flags);
if (IS_ERR(req))
return PTR_ERR(req);
- req->end_io = nvme_uring_cmd_end_io;
+ if (issue_flags & IO_URING_F_IOPOLL)
+ req->end_io = nvme_uring_iopoll_cmd_end_io;
+ else
+ req->end_io = nvme_uring_cmd_end_io;
The polled handler (nvme_uring_iopoll_cmd_end_io) may get called in irq
context (some swapper/kworker etc.) too. And in that case will it be
safe to call nvme_uring_task_cb directly.
We don't touch the user-fields in cmd (thanks to Big CQE) so that part is
sorted. But there is blk_rq_unmap_user call - can that or anything else
inside io_req_complete_post() cause trouble.
* A matching blk_rq_unmap_user() must be issued at the end of I/O, while
* still in process context.
*/
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data,
const struct iov_iter *iter, gfp_t gfp_mask)