With commit aa1df3a360a0 ("io_uring: fix CQE reordering"), there are stronger guarantees for overflow ordering. Specifically ensuring that userspace will not receive out of order receive CQEs. Therefore this is not needed any more for recv/recvmsg. Signed-off-by: Dylan Yudaken <dylany@xxxxxxxx> --- io_uring/net.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/io_uring/net.c b/io_uring/net.c index 0d77ddcce0af..4b79b61f5597 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -603,15 +603,11 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, if (!mshot_finished) { if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret, - cflags | IORING_CQE_F_MORE, false)) { + cflags | IORING_CQE_F_MORE, true)) { io_recv_prep_retry(req); return false; } - /* - * Otherwise stop multishot but use the current result. - * Probably will end up going into overflow, but this means - * we cannot trust the ordering anymore - */ + /* Otherwise stop multishot but use the current result. */ } io_req_set_res(req, *ret, cflags); -- 2.30.2