With IORING_SETUP_CQE32 ->cqe_cached doesn't store a real address but rather an implicit offset into cqes. Store the real cqe pointer and increment it accordingly if CQE32. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- io_uring/io_uring.c | 15 ++++++++++----- io_uring/io_uring.h | 8 ++------ 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 263d7e4f1b41..11b4b5040020 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -698,11 +698,8 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx) { struct io_rings *rings = ctx->rings; unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1); - unsigned int shift = 0; unsigned int free, queued, len; - if (ctx->flags & IORING_SETUP_CQE32) - shift = 1; /* userspace may cheat modifying the tail, be safe and do min */ queued = min(__io_cqring_events(ctx), ctx->cq_entries); @@ -712,11 +709,19 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx) if (!len) return NULL; - ctx->cached_cq_tail++; + if (ctx->flags & IORING_SETUP_CQE32) { + off <<= 1; + len <<= 1; + } + ctx->cqe_cached = &rings->cqes[off]; ctx->cqe_sentinel = ctx->cqe_cached + len; + + ctx->cached_cq_tail++; ctx->cqe_cached++; - return &rings->cqes[off << shift]; + if (ctx->flags & IORING_SETUP_CQE32) + ctx->cqe_cached++; + return &rings->cqes[off]; } static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index cd29d91c2175..f1b3e765495b 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -24,14 +24,10 @@ static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx) if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) { struct io_uring_cqe *cqe = ctx->cqe_cached; - if (ctx->flags & IORING_SETUP_CQE32) { - unsigned int off = ctx->cqe_cached - ctx->rings->cqes; - - cqe += off; - } - ctx->cached_cq_tail++; ctx->cqe_cached++; + if (ctx->flags & IORING_SETUP_CQE32) + ctx->cqe_cached++; return cqe; } -- 2.36.1