cur_mm is only used per submission, so it could be place into io_submit_state. There is the reasoning behind: - it's more convenient, don't need to pass it down the call stack - it's passed as a pointer, so in either case needs memory read/write - now uses heap (ctx->submit_state) instead of stack - set only once for non-IORING_SETUP_SQPOLL case. - generates pretty similar code as @ctx is hot and always somewhere in a register Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 46 ++++++++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 951c2fc7b5b7..c0e72390d272 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -219,6 +219,8 @@ struct io_submit_state { struct file *ring_file; int ring_fd; + + struct mm_struct *mm; }; struct io_ring_ctx { @@ -4834,8 +4836,7 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req, } static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, - struct file *ring_file, int ring_fd, - struct mm_struct **mm, bool async) + struct file *ring_file, int ring_fd, bool async) { struct blk_plug plug; struct io_kiocb *link = NULL; @@ -4883,15 +4884,15 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, break; } - if (io_op_defs[req->opcode].needs_mm && !*mm) { + if (io_op_defs[req->opcode].needs_mm && !ctx->submit_state.mm) { mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm); if (!mm_fault) { use_mm(ctx->sqo_mm); - *mm = ctx->sqo_mm; + ctx->submit_state.mm = ctx->sqo_mm; } } - req->has_user = *mm != NULL; + req->has_user = (ctx->submit_state.mm != NULL); req->in_async = async; req->needs_fixed_file = async; trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data, @@ -4918,7 +4919,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, static int io_sq_thread(void *data) { struct io_ring_ctx *ctx = data; - struct mm_struct *cur_mm = NULL; + struct io_submit_state *submit = &ctx->submit_state; const struct cred *old_cred; mm_segment_t old_fs; DEFINE_WAIT(wait); @@ -4993,10 +4994,15 @@ static int io_sq_thread(void *data) * adding ourselves to the waitqueue, as the unuse/drop * may sleep. */ - if (cur_mm) { - unuse_mm(cur_mm); - mmput(cur_mm); - cur_mm = NULL; + if (submit->mm) { + /* + * this thread is the only submitter, thus + * it's safe to change submit->mm without + * taking ctx->uring_lock + */ + unuse_mm(submit->mm); + mmput(submit->mm); + submit->mm = NULL; } prepare_to_wait(&ctx->sqo_wait, &wait, @@ -5027,16 +5033,17 @@ static int io_sq_thread(void *data) } mutex_lock(&ctx->uring_lock); - ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true); + ret = io_submit_sqes(ctx, to_submit, NULL, -1, true); mutex_unlock(&ctx->uring_lock); if (ret > 0) inflight += ret; } set_fs(old_fs); - if (cur_mm) { - unuse_mm(cur_mm); - mmput(cur_mm); + if (submit->mm) { + unuse_mm(submit->mm); + mmput(submit->mm); + submit->mm = NULL; } revert_creds(old_cred); @@ -5757,6 +5764,10 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx, mmgrab(current->mm); ctx->sqo_mm = current->mm; + ctx->submit_state.mm = NULL; + if (!(ctx->flags & IORING_SETUP_SQPOLL)) + ctx->submit_state.mm = ctx->sqo_mm; + if (ctx->flags & IORING_SETUP_SQPOLL) { ret = -EPERM; if (!capable(CAP_SYS_ADMIN)) @@ -6369,8 +6380,6 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, wake_up(&ctx->sqo_wait); submitted = to_submit; } else if (to_submit) { - struct mm_struct *cur_mm; - if (current->mm != ctx->sqo_mm || current_cred() != ctx->creds) { ret = -EPERM; @@ -6378,10 +6387,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, } mutex_lock(&ctx->uring_lock); - /* already have mm, so io_submit_sqes() won't try to grab it */ - cur_mm = ctx->sqo_mm; - submitted = io_submit_sqes(ctx, to_submit, f.file, fd, - &cur_mm, false); + submitted = io_submit_sqes(ctx, to_submit, f.file, fd, false); mutex_unlock(&ctx->uring_lock); if (submitted != to_submit) -- 2.24.0