hi,
I also use below debug patch to run test cases in liburing:
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 3bec6057c189..119764d18a61 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5819,6 +5819,13 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
refcount_set(&req->refs, 2);
req->task = NULL;
req->result = 0;
+ req->work.list.next = 0x1;
+ req->work.files = 0x2;
+ req->work.mm = 0x3;
+ req->work.creds = 0x4;
+ req->work.fs = 0x5;
+ req->work.flags = 0x6;
+ req->work.task_pid = 0x7;
All test cases pass.
Regards,
Xiaoguang Wang
Basically IORING_OP_POLL_ADD command and async armed poll handlers
for regular commands don't touch io_wq_work, so only REQ_F_WORK_INITIALIZED
is set, can we do io_wq_work copy and restore.
Signed-off-by: Xiaoguang Wang <xiaoguang.wang@xxxxxxxxxxxxxxxxx>
---
V3:
drop the REQ_F_WORK_NEED_RESTORE flag introduced in V2 patch, just
use REQ_F_WORK_INITIALIZED to control whether to do io_wq_work copy
and restore.
V6:
rebase to io_uring-5.8.
---
fs/io_uring.c | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index bde8b17a7275..3bec6057c189 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4261,7 +4261,8 @@ static void io_async_task_func(struct callback_head *cb)
spin_unlock_irq(&ctx->completion_lock);
/* restore ->work in case we need to retry again */
- memcpy(&req->work, &apoll->work, sizeof(req->work));
+ if (req->flags & REQ_F_WORK_INITIALIZED)
+ memcpy(&req->work, &apoll->work, sizeof(req->work));
kfree(apoll);
if (!canceled) {
@@ -4358,7 +4359,8 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
return false;
req->flags |= REQ_F_POLLED;
- memcpy(&apoll->work, &req->work, sizeof(req->work));
+ if (req->flags & REQ_F_WORK_INITIALIZED)
+ memcpy(&apoll->work, &req->work, sizeof(req->work));
had_io = req->io != NULL;
get_task_struct(current);
@@ -4383,7 +4385,8 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
if (!had_io)
io_poll_remove_double(req);
spin_unlock_irq(&ctx->completion_lock);
- memcpy(&req->work, &apoll->work, sizeof(req->work));
+ if (req->flags & REQ_F_WORK_INITIALIZED)
+ memcpy(&req->work, &apoll->work, sizeof(req->work));
kfree(apoll);
return false;
}
@@ -4428,7 +4431,9 @@ static bool io_poll_remove_one(struct io_kiocb *req)
* io_req_work_drop_env below when dropping the
* final reference.
*/
- memcpy(&req->work, &apoll->work, sizeof(req->work));
+ if (req->flags & REQ_F_WORK_INITIALIZED)
+ memcpy(&req->work, &apoll->work,
+ sizeof(req->work));
kfree(apoll);
}
}