We have numbers of {sq,cq} entries cached in ctx, don't look up them in user-shared rings as 1) it may fetch additional cacheline 2) user may change it and so it's always error prone. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 31eca208f675..15dc5dad1f7d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1352,7 +1352,7 @@ static inline bool io_sqring_full(struct io_ring_ctx *ctx) { struct io_rings *r = ctx->rings; - return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries; + return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries; } static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx) @@ -1370,7 +1370,7 @@ static inline struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx) * control dependency is enough as we're using WRITE_ONCE to * fill the cq entry */ - if (__io_cqring_events(ctx) == rings->cq_ring_entries) + if (__io_cqring_events(ctx) == ctx->cq_entries) return NULL; tail = ctx->cached_cq_tail++; @@ -1423,11 +1423,10 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) /* Returns true if there are no backlogged entries after the flush */ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) { - struct io_rings *rings = ctx->rings; unsigned long flags; bool all_flushed, posted; - if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries) + if (!force && __io_cqring_events(ctx) == ctx->cq_entries) return false; posted = false; -- 2.31.1