percpu_ref_tryget() has its own overhead. Instead getting a reference for each request, grab a bunch once per io_submit_sqes(). basic benchmark with submit and wait 128 non-linked nops showed ~5% performance gain. (7044 KIOPS vs 7423) Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- For notice: it could be done without @extra_refs variable, but looked too tangled because of gotos. fs/io_uring.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index cf4138f0e504..6c85dfc62224 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -845,9 +845,6 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; struct io_kiocb *req; - if (!percpu_ref_tryget(&ctx->refs)) - return NULL; - if (!state) { req = kmem_cache_alloc(req_cachep, gfp); if (unlikely(!req)) @@ -3929,6 +3926,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, struct io_submit_state state, *statep = NULL; struct io_kiocb *link = NULL; int i, submitted = 0; + unsigned int extra_refs; bool mm_fault = false; /* if we have a backlog and couldn't flush it all, return BUSY */ @@ -3941,6 +3939,10 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, statep = &state; } + if (!percpu_ref_tryget_many(&ctx->refs, nr)) + return -EAGAIN; + extra_refs = nr; + for (i = 0; i < nr; i++) { struct io_kiocb *req = io_get_req(ctx, statep); @@ -3949,6 +3951,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, submitted = -EAGAIN; break; } + --extra_refs; if (!io_get_sqring(ctx, req)) { __io_free_req(req); break; @@ -3976,6 +3979,8 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, io_queue_link_head(link); if (statep) io_submit_state_end(&state); + if (extra_refs) + percpu_ref_put_many(&ctx->refs, extra_refs); /* Commit SQ ring head once we've consumed and submitted all SQEs */ io_commit_sqring(ctx); -- 2.24.0