(Apparently this went out without my comments attached, only one thing worth noting so repeating that) >>> @@ -695,7 +701,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, >>> unsigned int cflags; >>> cflags = io_put_kbuf(req, issue_flags); >>> - if (msg->msg_inq && msg->msg_inq != -1) >>> + if (msg && msg->msg_inq && msg->msg_inq != -1) >>> cflags |= IORING_CQE_F_SOCK_NONEMPTY; >>> if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { >>> @@ -723,7 +729,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, >>> goto enobufs; >>> /* Known not-empty or unknown state, retry */ >>> - if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1) { >>> + if (cflags & IORING_CQE_F_SOCK_NONEMPTY || (msg && msg->msg_inq == -1)) { >>> if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY) >>> return false; >>> /* mshot retries exceeded, force a requeue */ >> >> Maybe refactor this a bit so that you don't need to add these NULL >> checks? That seems pretty fragile, hard to read, and should be doable >> without extra checks. > > That chunk can be completely thrown away, we're not using > io_recv_finish() here anymore OK good! >>> @@ -1053,6 +1058,85 @@ struct io_zc_rx_ifq *io_zc_verify_sock(struct io_kiocb *req, >>> return ifq; >>> } >>> +int io_recvzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) >>> +{ >>> + struct io_recvzc *zc = io_kiocb_to_cmd(req, struct io_recvzc); >>> + >>> + /* non-iopoll defer_taskrun only */ >>> + if (!req->ctx->task_complete) >>> + return -EINVAL; >> >> What's the reasoning behind this? > > CQ locking, see the comment a couple lines below My question here was more towards "is this something we want to do". Maybe this is just a temporary work-around and it's nothing to discuss, but I'm not sure we want to have opcodes only work on certain ring setups. -- Jens Axboe