Kill ->cached_sq_dropped and wire DRAIN sequence number correction via ->cq_extra, which is there exactly for that purpose. User visible dropped counter will be populated by incrementing it instead of keeping a copy, similarly as it was done not so long ago with cq_overflow. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 3baacfe2c9b7..6dd14f4aa5f1 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -370,7 +370,6 @@ struct io_ring_ctx { struct io_uring_sqe *sq_sqes; unsigned cached_sq_head; unsigned sq_entries; - unsigned cached_sq_dropped; unsigned long sq_check_overflow; struct list_head defer_list; @@ -5994,13 +5993,11 @@ static u32 io_get_sequence(struct io_kiocb *req) { struct io_kiocb *pos; struct io_ring_ctx *ctx = req->ctx; - u32 total_submitted, nr_reqs = 0; + u32 nr_reqs = 0; io_for_each_link(pos, req) nr_reqs++; - - total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped; - return total_submitted - nr_reqs; + return ctx->cached_sq_head - nr_reqs; } static int io_req_defer(struct io_kiocb *req) @@ -6701,8 +6698,9 @@ static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx) return &ctx->sq_sqes[head]; /* drop invalid entries */ - ctx->cached_sq_dropped++; - WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped); + ctx->cq_extra--; + WRITE_ONCE(ctx->rings->sq_dropped, + READ_ONCE(ctx->rings->sq_dropped) + 1); return NULL; } -- 2.31.1