io_uring treats all completions the same - they post a completion event, or more, and anyone waiting on event completions will see each event as it gets posted. However, some events may be more interesting that others. For a request and response type model, it's not uncommon to have send/write events that are submitted with a recv/read type of request. While the app does want to see a successful send/write completion eventually, it need not handle it upfront as it would want to do with a recv/read, as it isn't time sensitive. Generally, a send/write completion will just mean that a buffer can get recycled/reused, whereas a recv/read completion needs acting upon (and a response sent). This can be somewhat tricky to handle if many requests and responses are being handled, and the app generally needs to track the number of pending sends/writes to be able to sanely wait on just new incoming recv/read requests. And even with that, an application would still like to see a completion for a short/failed send/write immediately. Add infrastructure to account inline completions, such that they can be deducted from the 'wait_nr' being passed in via a submit_and_wait() type of situation. Inline completions are ones that complete directly inline from submission, such as a send to a socket where there's enough space to accomodate the data being sent. No functional changes in this patch, as no opcode supports setting REQ_F_IGNORE_INLINE just yet. Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- include/linux/io_uring_types.h | 4 ++++ include/uapi/linux/io_uring.h | 1 + io_uring/io_uring.c | 12 +++++++++--- io_uring/io_uring.h | 2 ++ 4 files changed, 16 insertions(+), 3 deletions(-) diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 9c7e1d3f06e5..6eb8b739ea0d 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -206,6 +206,7 @@ struct io_submit_state { bool need_plug; bool cq_flush; unsigned short submit_nr; + unsigned short inline_completions; struct blk_plug plug; }; @@ -465,6 +466,7 @@ enum { REQ_F_BL_EMPTY_BIT, REQ_F_BL_NO_RECYCLE_BIT, REQ_F_BUFFERS_COMMIT_BIT, + REQ_F_IGNORE_INLINE_BIT, /* not a real bit, just to check we're not overflowing the space */ __REQ_F_LAST_BIT, @@ -541,6 +543,8 @@ enum { REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT), /* buffer ring head needs incrementing on put */ REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT), + /* if set, ignore these completions for when waiting on events */ + REQ_F_IGNORE_INLINE = IO_REQ_FLAG(REQ_F_IGNORE_INLINE_BIT), }; typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 86cb385fe0b5..1967f5ab2317 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -554,6 +554,7 @@ struct io_uring_params { #define IORING_FEAT_REG_REG_RING (1U << 13) #define IORING_FEAT_RECVSEND_BUNDLE (1U << 14) #define IORING_FEAT_MIN_TIMEOUT (1U << 15) +#define IORING_FEAT_IGNORE_INLINE (1U << 16) /* * io_uring_register(2) opcodes and arguments diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index d7ad4ea5f40b..706822db7447 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2239,6 +2239,7 @@ static void io_submit_state_start(struct io_submit_state *state, state->plug_started = false; state->need_plug = max_ios > 2; state->submit_nr = max_ios; + state->inline_completions = 0; /* set only head, no need to init link_last in advance */ state->link.head = NULL; } @@ -3285,6 +3286,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, size_t, argsz) { struct io_ring_ctx *ctx; + int inline_complete = 0; struct file *file; long ret; @@ -3349,6 +3351,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, mutex_unlock(&ctx->uring_lock); goto out; } + inline_complete = ctx->submit_state.inline_completions; if (flags & IORING_ENTER_GETEVENTS) { if (ctx->syscall_iopoll) goto iopoll_locked; @@ -3386,8 +3389,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, ret2 = io_get_ext_arg(flags, argp, &ext_arg); if (likely(!ret2)) { - min_complete = min(min_complete, - ctx->cq_entries); + if (min_complete > ctx->cq_entries) + min_complete = ctx->cq_entries; + else + min_complete += inline_complete; ret2 = io_cqring_wait(ctx, min_complete, flags, &ext_arg); } @@ -3674,7 +3679,8 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p, IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS | IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP | IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING | - IORING_FEAT_RECVSEND_BUNDLE | IORING_FEAT_MIN_TIMEOUT; + IORING_FEAT_RECVSEND_BUNDLE | IORING_FEAT_MIN_TIMEOUT | + IORING_FEAT_IGNORE_INLINE; if (copy_to_user(params, p, sizeof(*p))) { ret = -EFAULT; diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 9d70b2cf7b1e..bd1d4b6e46f0 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -357,6 +357,8 @@ static inline void io_req_complete_defer(struct io_kiocb *req) lockdep_assert_held(&req->ctx->uring_lock); wq_list_add_tail(&req->comp_list, &state->compl_reqs); + if (req->flags & REQ_F_IGNORE_INLINE) + state->inline_completions++; } static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx) -- 2.45.2