On 6/20/21 8:56 PM, Pavel Begunkov wrote: > On 6/20/21 8:05 PM, Olivier Langlois wrote: >> It is quite frequent that when an operation fails and returns EAGAIN, >> the data becomes available between that failure and the call to >> vfs_poll() done by io_arm_poll_handler(). >> >> Detecting the situation and reissuing the operation is much faster >> than going ahead and push the operation to the io-wq. >> >> Signed-off-by: Olivier Langlois <olivier@xxxxxxxxxxxxxx> >> --- >> fs/io_uring.c | 26 +++++++++++++++++--------- >> 1 file changed, 17 insertions(+), 9 deletions(-) >> >> diff --git a/fs/io_uring.c b/fs/io_uring.c >> index fa8794c61af7..6e037304429a 100644 >> --- a/fs/io_uring.c >> +++ b/fs/io_uring.c >> @@ -5143,7 +5143,10 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req, >> return mask; >> } >> >> -static bool io_arm_poll_handler(struct io_kiocb *req) >> +#define IO_ARM_POLL_OK 0 >> +#define IO_ARM_POLL_ERR 1 >> +#define IO_ARM_POLL_READY 2 > > Please add a new line here. Can even be moved somewhere > to the top, but it's a matter of taste. > > Also, how about to rename it to apoll? io_uring internal > rw/send/recv polling is often abbreviated as such around > io_uring.c > IO_APOLL_OK and so on. > >> +static int io_arm_poll_handler(struct io_kiocb *req) >> { >> const struct io_op_def *def = &io_op_defs[req->opcode]; >> struct io_ring_ctx *ctx = req->ctx; >> @@ -5153,22 +5156,22 @@ static bool io_arm_poll_handler(struct io_kiocb *req) >> int rw; >> >> if (!req->file || !file_can_poll(req->file)) >> - return false; >> + return IO_ARM_POLL_ERR; > > It's not really an error. Maybe IO_APOLL_ABORTED or so? fwiw, I mean totally replacing *_ERR, not only this return > >> if (req->flags & REQ_F_POLLED) >> - return false; >> + return IO_ARM_POLL_ERR; >> if (def->pollin) >> rw = READ; >> else if (def->pollout) >> rw = WRITE; >> else >> - return false; >> + return IO_ARM_POLL_ERR; >> /* if we can't nonblock try, then no point in arming a poll handler */ >> if (!io_file_supports_async(req, rw)) >> - return false; >> + return IO_ARM_POLL_ERR; >> >> apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); >> if (unlikely(!apoll)) >> - return false; >> + return IO_ARM_POLL_ERR; >> apoll->double_poll = NULL; >> >> req->flags |= REQ_F_POLLED; >> @@ -5194,12 +5197,12 @@ static bool io_arm_poll_handler(struct io_kiocb *req) >> if (ret || ipt.error) { >> io_poll_remove_double(req); >> spin_unlock_irq(&ctx->completion_lock); >> - return false; >> + return ret?IO_ARM_POLL_READY:IO_ARM_POLL_ERR; > > spaces would be great. > >> } >> spin_unlock_irq(&ctx->completion_lock); >> trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask, >> apoll->poll.events); >> - return true; >> + return IO_ARM_POLL_OK; >> } >> >> static bool __io_poll_remove_one(struct io_kiocb *req, >> @@ -6416,6 +6419,7 @@ static void __io_queue_sqe(struct io_kiocb *req) >> struct io_kiocb *linked_timeout = io_prep_linked_timeout(req); >> int ret; >> >> +issue_sqe: >> ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); >> >> /* >> @@ -6435,12 +6439,16 @@ static void __io_queue_sqe(struct io_kiocb *req) >> io_put_req(req); >> } >> } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { >> - if (!io_arm_poll_handler(req)) { >> + switch (io_arm_poll_handler(req)) { >> + case IO_ARM_POLL_READY: >> + goto issue_sqe; > > Checked assembly, the fast path looks ok (i.e. not affected). > Also, a note, linked_timeout is handled correctly. > >> + case IO_ARM_POLL_ERR: >> /* >> * Queued up for async execution, worker will release >> * submit reference when the iocb is actually submitted. >> */ >> io_queue_async_work(req); >> + break; >> } >> } else { >> io_req_complete_failed(req, ret); >> > -- Pavel Begunkov