On 6/3/24 6:04 PM, Gabriel Krisman Bertazi wrote: > +static void *do_server(void *data) > +{ > + struct srv_data *rd = data; > + > + struct io_uring_params p = { }; > + struct __kernel_timespec ts; > + struct io_uring_sqe *sqe; > + struct io_uring_cqe *cqe; > + struct io_uring ring; > + int ret, conn, sock_index; > + unsigned head; > + int fd, val; > + char buf[1024]; > + > + ret = t_create_ring_params(4, &ring, &p); > + if (ret < 0) { > + fprintf(stderr, "queue_init: %s\n", strerror(-ret)); > + goto err; > + } > + > + ret = io_uring_register_files(&ring, &fd, 1); > + if (ret) { > + fprintf(stderr, "file register %d\n", ret); > + goto err; > + } > + > + memset(&server_addr, 0, sizeof(struct sockaddr_in)); > + server_addr.sin_family = AF_INET; > + server_addr.sin_port = htons(8000); > + server_addr.sin_addr.s_addr = htons(INADDR_ANY); > + > + sock_index = 0; > + sqe = io_uring_get_sqe(&ring); > + io_uring_prep_socket_direct(sqe, AF_INET, SOCK_STREAM, 0, > + sock_index, 0); > + > + sqe = io_uring_get_sqe(&ring); > + val = 1; > + io_uring_prep_cmd_sock(sqe, SOCKET_URING_OP_SETSOCKOPT, 0, > + SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val)); > + sqe->flags |= IOSQE_FIXED_FILE | IOSQE_IO_LINK; > + > + sqe = io_uring_get_sqe(&ring); > + io_uring_prep_bind(sqe, sock_index, (struct sockaddr *) &server_addr, > + sizeof(struct sockaddr_in)); > + sqe->flags |= IOSQE_FIXED_FILE | IOSQE_IO_LINK; > + > + sqe = io_uring_get_sqe(&ring); > + io_uring_prep_listen(sqe, sock_index, 1); > + sqe->flags |= IOSQE_FIXED_FILE | IOSQE_IO_LINK; > + > + ret = io_uring_submit(&ring); > + if (ret < 0) { > + printf("submission failed. %d\n", ret); > + goto err; > + } Did you test this on older kernels? I suspect they will fail without IORING_SETUP_SUBMIT_ALL set, and they will fail even if set for kernels that don't support bind/listen. We need to reliably return T_EXIT_SKIP for kernels that don't support this feature. > +static int do_client() > +{ > + struct io_uring_sqe *sqe; > + struct io_uring_cqe *cqe; > + struct sockaddr_in peer_addr; > + socklen_t addr_len = sizeof(peer_addr); > + struct io_uring ring; > + int ret, fd = -1, sock_index; > + int i; > + > + ret = io_uring_queue_init(3, &ring, 0); > + if (ret < 0) { > + fprintf(stderr, "queue_init: %s\n", strerror(-ret)); > + return -1; > + } > + > + ret = io_uring_register_files(&ring, &fd, 1); > + if (ret) { > + fprintf(stderr, "file register %d\n", ret); > + goto err; > + } > + > + > + peer_addr.sin_family = AF_INET; > + peer_addr.sin_port = server_addr.sin_port; > + peer_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); > + > + sock_index = 0; > + sqe = io_uring_get_sqe(&ring); > + io_uring_prep_socket_direct(sqe, AF_INET, SOCK_STREAM, 0, > + sock_index, 0); > + > + sqe = io_uring_get_sqe(&ring); > + io_uring_prep_connect(sqe, sock_index, (struct sockaddr*) &peer_addr, addr_len); > + sqe->flags |= IOSQE_FIXED_FILE | IOSQE_IO_LINK; > + > + sqe = io_uring_get_sqe(&ring); > + > + io_uring_prep_send(sqe, sock_index, magic, strlen(magic), 0); > + sqe->flags |= IOSQE_FIXED_FILE | IOSQE_IO_LINK; > + > + io_uring_submit(&ring); > + io_uring_wait_cqe_nr(&ring, &cqe, 3); Ditto for these, if socket/connect/send aren't supported. > + > + io_uring_for_each_cqe(&ring, i, cqe) { > + if (cqe->res < 0) { > + printf("client cqe. idx=%d, %d\n", i, cqe->res); > + } > + } > + io_uring_cq_advance(&ring, 2); > + > + return 0; > +err: > + return -1; > +} > + > +int main(int argc, char *argv[]) > +{ > + pthread_mutexattr_t attr; > + pthread_t srv_thread; > + struct srv_data srv_data; > + int ret; > + void *retval; > + > + if (argc > 1) > + return 0; > + > + pthread_mutexattr_init(&attr); > + pthread_mutexattr_setpshared(&attr, 1); > + pthread_mutex_init(&srv_data.mutex, &attr); > + pthread_mutex_lock(&srv_data.mutex); Probably be better with a barrier rather than a mutex? -- Jens Axboe