On Sat, Mar 13, 2021 at 12:30:14PM -0700, Jens Axboe wrote: > @@ -2851,7 +2852,7 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, > list_del(&kbuf->list); > } else { > kbuf = head; > - idr_remove(&req->ctx->io_buffer_idr, bgid); > + __xa_erase(&req->ctx->io_buffer, bgid); Umm ... __xa_erase()? Did you enable all the lockdep infrastructure? This should have tripped some of the debugging code because I don't think you're holding the xa_lock. > @@ -3993,21 +3994,20 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) > > lockdep_assert_held(&ctx->uring_lock); > > - list = head = idr_find(&ctx->io_buffer_idr, p->bgid); > + list = head = xa_load(&ctx->io_buffer, p->bgid); > > ret = io_add_buffers(p, &head); > - if (ret < 0) > - goto out; > + if (ret >= 0 && !list) { > + u32 id = -1U; > > - if (!list) { > - ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1, > - GFP_KERNEL); > - if (ret < 0) { > + ret = __xa_alloc_cyclic(&ctx->io_buffer, &id, head, > + XA_LIMIT(0, USHRT_MAX), > + &ctx->io_buffer_next, GFP_KERNEL); I don't understand why this works. The equivalent transformation here would have been: ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL); with various options to handle it differently. > static void io_destroy_buffers(struct io_ring_ctx *ctx) > { > - idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx); > - idr_destroy(&ctx->io_buffer_idr); > + struct io_buffer *buf; > + unsigned long index; > + > + xa_for_each(&ctx->io_buffer, index, buf) > + __io_remove_buffers(ctx, buf, index, -1U); > + xa_destroy(&ctx->io_buffer); Honestly, I'd do BUG_ON(!xa_empty(&ctx->io_buffers)) if anything. If that loop didn't empty the array, something is terribly wrong and we should know about it somehow instead of making the memory leak harder to find.