Add a BPF_FUNC_iouring_queue_sqe BPF function as a demonstration of submmiting a new request by a BPF request. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 79 ++++++++++++++++++++++++++++++++++++---- include/uapi/linux/bpf.h | 1 + 2 files changed, 72 insertions(+), 8 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 716881ca0b48..2c63a3e68938 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -845,8 +845,14 @@ struct io_op_def { unsigned work_flags; }; +struct io_submit_link { + struct io_kiocb *head; + struct io_kiocb *last; +}; struct io_bpf_ctx { + struct io_ring_ctx *ctx; + struct io_submit_link link; }; static const struct io_op_def io_op_defs[] = { @@ -6716,11 +6722,6 @@ static inline void io_queue_link_head(struct io_kiocb *req) io_queue_sqe(req, NULL); } -struct io_submit_link { - struct io_kiocb *head; - struct io_kiocb *last; -}; - static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, struct io_submit_link *link) { @@ -6951,7 +6952,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, ret = -EBADF; } - state->ios_left--; + if (state->ios_left > 1) + state->ios_left--; return ret; } @@ -10312,10 +10314,63 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, return ret; } +static int io_ebpf_prep_req(struct io_bpf_ctx *bpf_ctx, + const struct io_uring_sqe *sqe) +{ + struct io_ring_ctx *ctx = bpf_ctx->ctx; + struct io_kiocb *req = io_alloc_req(ctx); + int ret; + + if (unlikely(!req)) + return -ENOMEM; + if (!percpu_ref_tryget_many(&ctx->refs, 1)) { + kmem_cache_free(req_cachep, req); + return -EAGAIN; + } + percpu_counter_add(¤t->io_uring->inflight, 1); + refcount_add(1, ¤t->usage); + + ret = io_init_req(ctx, req, sqe); + if (unlikely(ret)) + goto fail_req; + + ret = io_submit_sqe(req, sqe, &bpf_ctx->link); + if (!ret) + return 0; +fail_req: + io_double_put_req(req); + return ret; +} + +BPF_CALL_3(bpf_io_uring_queue_sqe, void *, ctx, const void *, psqe, u32, len) +{ + const struct io_uring_sqe *sqe = psqe; + struct io_bpf_ctx *bpf_ctx = ctx; + + if (len != sizeof(struct io_uring_sqe)) + return -EINVAL; + + return io_ebpf_prep_req(bpf_ctx, sqe); +} + +const struct bpf_func_proto bpf_io_uring_queue_sqe_proto = { + .func = bpf_io_uring_queue_sqe, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_CONST_SIZE, +}; + static const struct bpf_func_proto * io_bpf_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { - return bpf_base_func_proto(func_id); + switch (func_id) { + case BPF_FUNC_iouring_queue_sqe: + return &bpf_io_uring_queue_sqe_proto; + default: + return bpf_base_func_proto(func_id); + } } static bool io_bpf_is_valid_access(int off, int size, @@ -10345,8 +10400,16 @@ static void io_bpf_run(struct io_kiocb *req) return; } - memset(&bpf_ctx, 0, sizeof(bpf_ctx)); + io_submit_state_start(&ctx->submit_state, 1); + bpf_ctx.ctx = ctx; + bpf_ctx.link.head = NULL; + BPF_PROG_RUN(req->bpf.prog, &bpf_ctx); + + if (bpf_ctx.link.head) + io_queue_link_head(bpf_ctx.link.head); + io_submit_state_end(&ctx->submit_state, ctx); + io_req_complete(req, 0); } diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 2f1c0ab097d8..8c7c8f4ad044 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3996,6 +3996,7 @@ union bpf_attr { FN(ktime_get_coarse_ns), \ FN(ima_inode_hash), \ FN(sock_from_file), \ + FN(iouring_queue_sqe), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper -- 2.24.0