On 5/7/22 9:20 AM, Hao Xu wrote: > @@ -5757,8 +5771,26 @@ static int io_accept(struct io_kiocb *req, unsigned int issue_flags) > ret = io_install_fixed_file(req, file, issue_flags, > accept->file_slot - 1); > } > - __io_req_complete(req, issue_flags, ret, 0); > - return 0; > + > + if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { > + __io_req_complete(req, issue_flags, ret, 0); > + return 0; > + } > + if (ret >= 0) { > + bool filled; > + > + spin_lock(&ctx->completion_lock); > + filled = io_fill_cqe_aux(ctx, req->cqe.user_data, ret, > + IORING_CQE_F_MORE); > + io_commit_cqring(ctx); > + spin_unlock(&ctx->completion_lock); > + if (!filled) > + return -ECANCELED; > + io_cqring_ev_posted(ctx); > + goto retry; > + } > + > + return ret; I'd still make that: if (filled) { io_cqring_ev_posted(ctx); goto retry; } ret = -ECANCELED; as it flows better and shows what the likely outcome is. > static int io_connect_prep_async(struct io_kiocb *req) > diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h > index f4d9ca62a5a6..7c3d70d12428 100644 > --- a/include/uapi/linux/io_uring.h > +++ b/include/uapi/linux/io_uring.h > @@ -224,9 +224,9 @@ enum { > #define IORING_RECVSEND_POLL_FIRST (1U << 0) > > /* > - * accept flags stored in accept_flags > + * accept flags stored in sqe->ioprio > */ > -#define IORING_ACCEPT_MULTISHOT (1U << 15) > +#define IORING_ACCEPT_MULTISHOT (1U << 0) > > /* > * IO completion data structure (Completion Queue Entry) This hunk needs to get folded in to the patch that adds the flag in the first place. -- Jens Axboe