There is a bunch of inline helpers that will be useful not only to the core of io_uring, move them to headers. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- io_uring/io_uring.c | 22 ---------------------- io_uring/io_uring.h | 22 ++++++++++++++++++++++ 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 6a94d1682aaf..3fdb368820c9 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -616,14 +616,6 @@ struct sock *io_uring_get_socket(struct file *file) } EXPORT_SYMBOL(io_uring_get_socket); -static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked) -{ - if (!*locked) { - mutex_lock(&ctx->uring_lock); - *locked = true; - } -} - static inline void io_submit_flush_completions(struct io_ring_ctx *ctx) { if (!wq_list_empty(&ctx->submit_state.compl_reqs)) @@ -879,15 +871,6 @@ static void io_prep_async_link(struct io_kiocb *req) } } -static inline void io_req_add_compl_list(struct io_kiocb *req) -{ - struct io_submit_state *state = &req->ctx->submit_state; - - if (!(req->flags & REQ_F_CQE_SKIP)) - state->flush_cqes = true; - wq_list_add_tail(&req->comp_list, &state->compl_reqs); -} - void io_queue_iowq(struct io_kiocb *req, bool *dont_use) { struct io_kiocb *link = io_prep_linked_timeout(req); @@ -1293,11 +1276,6 @@ static void io_req_complete_post32(struct io_kiocb *req, u64 extra1, u64 extra2) io_cqring_ev_posted(ctx); } -static inline void io_req_complete_state(struct io_kiocb *req) -{ - req->flags |= REQ_F_COMPLETE_INLINE; -} - inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags) { if (issue_flags & IO_URING_F_COMPLETE_DEFER) diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 3660df80e589..26b669746d61 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -193,6 +193,28 @@ static inline bool io_run_task_work(void) return false; } +static inline void io_req_complete_state(struct io_kiocb *req) +{ + req->flags |= REQ_F_COMPLETE_INLINE; +} + +static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked) +{ + if (!*locked) { + mutex_lock(&ctx->uring_lock); + *locked = true; + } +} + +static inline void io_req_add_compl_list(struct io_kiocb *req) +{ + struct io_submit_state *state = &req->ctx->submit_state; + + if (!(req->flags & REQ_F_CQE_SKIP)) + state->flush_cqes = true; + wq_list_add_tail(&req->comp_list, &state->compl_reqs); +} + int io_run_task_work_sig(void); void io_req_complete_failed(struct io_kiocb *req, s32 res); void __io_req_complete32(struct io_kiocb *req, unsigned int issue_flags, -- 2.36.1