On 6/30/20 6:20 AM, Pavel Begunkov wrote: > 86b71d0daee05 ("io_uring: deduplicate freeing linked timeouts") > actually fixed one bug, where io_fail_links() doesn't consider > REQ_F_COMP_LOCKED, but added another -- io_cqring_fill_event() > without any locking > > Return locking back there and do it right with REQ_F_COMP_LOCKED > check. Something like the below is much better, and it also makes it so that the static analyzers don't throw a fit. I'm going to fold this into the original. diff --git a/fs/io_uring.c b/fs/io_uring.c index e1410ff31892..a0aea78162a6 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1600,7 +1600,7 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr) /* * Called if REQ_F_LINK_HEAD is set, and we fail the head request */ -static void io_fail_links(struct io_kiocb *req) +static void __io_fail_links(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; @@ -1620,6 +1620,23 @@ static void io_fail_links(struct io_kiocb *req) io_cqring_ev_posted(ctx); } +static void io_fail_links(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + + if (!(req->flags & REQ_F_COMP_LOCKED)) { + unsigned long flags; + + spin_lock_irqsave(&ctx->completion_lock, flags); + __io_fail_links(req); + spin_unlock_irqrestore(&ctx->completion_lock, flags); + } else { + __io_fail_links(req); + } + + io_cqring_ev_posted(ctx); +} + static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt) { if (likely(!(req->flags & REQ_F_LINK_HEAD))) -- Jens Axboe