Basically IORING_OP_POLL_ADD command and async armed poll handlers for regular commands don't touch io_wq_work, so there is no need to always do io_wq_work copy. Here add a new flag 'REQ_F_WORK_NEED_RESTORE' to control whether to do io_wq_work copy. Signed-off-by: Xiaoguang Wang <xiaoguang.wang@xxxxxxxxxxxxxxxxx> --- fs/io_uring.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index a54b21e6d921..6b9c79048962 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -535,6 +535,7 @@ enum { REQ_F_POLLED_BIT, REQ_F_BUFFER_SELECTED_BIT, REQ_F_NO_FILE_TABLE_BIT, + REQ_F_WORK_NEED_RESTORE_BIT, /* not a real bit, just to check we're not overflowing the space */ __REQ_F_LAST_BIT, @@ -590,6 +591,8 @@ enum { REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT), /* doesn't need file table for this request */ REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT), + /* need restore io_wq_work */ + REQ_F_WORK_NEED_RESTORE = BIT(REQ_F_WORK_NEED_RESTORE_BIT), }; struct async_poll { @@ -4390,7 +4393,10 @@ static void io_async_task_func(struct callback_head *cb) spin_unlock_irq(&ctx->completion_lock); /* restore ->work in case we need to retry again */ - memcpy(&req->work, &apoll->work, sizeof(req->work)); + if (req->flags & REQ_F_WORK_NEED_RESTORE) + memcpy(&req->work, &apoll->work, sizeof(req->work)); + else + req->work.func = NULL; kfree(apoll); if (!canceled) { @@ -4487,7 +4493,10 @@ static bool io_arm_poll_handler(struct io_kiocb *req) return false; req->flags |= REQ_F_POLLED; - memcpy(&apoll->work, &req->work, sizeof(req->work)); + if (req->work.func) { + req->flags |= REQ_F_WORK_NEED_RESTORE; + memcpy(&apoll->work, &req->work, sizeof(req->work)); + } had_io = req->io != NULL; get_task_struct(current); @@ -4512,7 +4521,10 @@ static bool io_arm_poll_handler(struct io_kiocb *req) if (!had_io) io_poll_remove_double(req); spin_unlock_irq(&ctx->completion_lock); - memcpy(&req->work, &apoll->work, sizeof(req->work)); + if (req->flags & REQ_F_WORK_NEED_RESTORE) + memcpy(&req->work, &apoll->work, sizeof(req->work)); + else + req->work.func = NULL; kfree(apoll); return false; } @@ -4557,7 +4569,11 @@ static bool io_poll_remove_one(struct io_kiocb *req) * io_req_work_drop_env below when dropping the * final reference. */ - memcpy(&req->work, &apoll->work, sizeof(req->work)); + if (req->flags & REQ_F_WORK_NEED_RESTORE) + memcpy(&req->work, &apoll->work, + sizeof(req->work)); + else + req->work.func = NULL; kfree(apoll); } } -- 2.17.2