Add more annotations for submission path functions holding ->uring_lock. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index 911a223a90e1..0f49736cd2b4 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2130,6 +2130,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, } static void io_submit_flush_completions(struct io_ring_ctx *ctx) + __must_hold(&req->ctx->uring_lock) { struct io_comp_state *cs = &ctx->submit_state.comp; int i, nr = cs->nr; @@ -6474,6 +6475,7 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) } static void __io_queue_sqe(struct io_kiocb *req) + __must_hold(&req->ctx->uring_lock) { struct io_kiocb *linked_timeout = io_prep_linked_timeout(req); int ret; @@ -6517,6 +6519,7 @@ static void __io_queue_sqe(struct io_kiocb *req) } static inline void io_queue_sqe(struct io_kiocb *req) + __must_hold(&req->ctx->uring_lock) { if (unlikely(req->ctx->drain_active) && io_drain_req(req)) return; @@ -6561,6 +6564,7 @@ static inline bool io_check_restriction(struct io_ring_ctx *ctx, static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, const struct io_uring_sqe *sqe) + __must_hold(&ctx->uring_lock) { struct io_submit_state *state; unsigned int sqe_flags; @@ -6624,6 +6628,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, const struct io_uring_sqe *sqe) + __must_hold(&ctx->uring_lock) { struct io_submit_link *link = &ctx->submit_state.link; int ret; @@ -6756,6 +6761,7 @@ static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx) } static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) + __must_hold(&ctx->uring_lock) { struct io_uring_task *tctx; int submitted = 0; -- 2.32.0