sq->cached_sq_head and cq->cached_cq_tail are both unsigned int. if cached_sq_head gets overflowed before cached_cq_tail, then we may miss a barrier req. As cached_cq_tail moved always following cached_sq_head, the NQ should be enough. Signed-off-by: Zhengyuan Liu <liuzhengyuan@xxxxxxxxxx> --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 3e48fd7cd08f..55294ef82102 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -429,7 +429,7 @@ static inline bool io_sequence_defer(struct io_ring_ctx *ctx, if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN) return false; - return req->sequence > ctx->cached_cq_tail + ctx->sq_ring->dropped; + return req->sequence != ctx->cached_cq_tail + ctx->sq_ring->dropped; } static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) -- 2.19.1