This adds support for fuse request completion through ring SQEs (FUSE_URING_REQ_COMMIT_AND_FETCH handling). After committing the ring entry it becomes available for new fuse requests. Handling of requests through the ring (SQE/CQE handling) is complete now. Fuse request data are copied through the mmaped ring buffer, there is no support for any zero copy yet. Signed-off-by: Bernd Schubert <bschubert@xxxxxxx> --- fs/fuse/dev_uring.c | 407 ++++++++++++++++++++++++++++++++++++++++++++++++++ fs/fuse/dev_uring_i.h | 21 +++ 2 files changed, 428 insertions(+) diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c index 46c2274193bf..96347751668e 100644 --- a/fs/fuse/dev_uring.c +++ b/fs/fuse/dev_uring.c @@ -29,6 +29,26 @@ #include <linux/topology.h> #include <linux/io_uring/cmd.h> +struct fuse_uring_cmd_pdu { + struct fuse_ring_ent *ring_ent; +}; + +/* + * Finalize a fuse request, then fetch and send the next entry, if available + */ +static void fuse_uring_req_end(struct fuse_ring_ent *ring_ent, + bool set_err, int error) +{ + struct fuse_req *req = ring_ent->fuse_req; + + if (set_err) + req->out.h.error = error; + + clear_bit(FR_SENT, &req->flags); + fuse_request_end(ring_ent->fuse_req); + ring_ent->fuse_req = NULL; +} + static int fuse_ring_ring_ent_unset_userspace(struct fuse_ring_ent *ent) { if (WARN_ON_ONCE(ent->state != FRRS_USERSPACE)) @@ -40,6 +60,13 @@ static int fuse_ring_ring_ent_unset_userspace(struct fuse_ring_ent *ent) return 0; } +static void +fuse_uring_async_send_to_ring(struct io_uring_cmd *cmd, + unsigned int issue_flags) +{ + io_uring_cmd_done(cmd, 0, 0, issue_flags); +} + /* Update conn limits according to ring values */ static void fuse_uring_conn_cfg_limits(struct fuse_ring *ring) { @@ -65,6 +92,9 @@ static void fuse_uring_queue_cfg(struct fuse_ring_queue *queue, int qid, INIT_LIST_HEAD(&queue->sync_ent_avail_queue); INIT_LIST_HEAD(&queue->async_ent_avail_queue); + INIT_LIST_HEAD(&queue->ent_in_userspace); + INIT_LIST_HEAD(&queue->sync_fuse_req_queue); + INIT_LIST_HEAD(&queue->async_fuse_req_queue); for (tag = 0; tag < ring->queue_depth; tag++) { struct fuse_ring_ent *ent = &queue->ring_ent[tag]; @@ -173,6 +203,200 @@ int fuse_uring_conn_cfg(struct file *file, void __user *argp) return res; } +/* + * Checks for errors and stores it into the request + */ +static int fuse_uring_out_header_has_err(struct fuse_out_header *oh, + struct fuse_req *req, + struct fuse_conn *fc) +{ + int err; + + if (oh->unique == 0) { + /* Not supportd through request based uring, this needs another + * ring from user space to kernel + */ + pr_warn("Unsupported fuse-notify\n"); + err = -EINVAL; + goto seterr; + } + + if (oh->error <= -512 || oh->error > 0) { + err = -EINVAL; + goto seterr; + } + + if (oh->error) { + err = oh->error; + pr_devel("%s:%d err=%d op=%d req-ret=%d", __func__, __LINE__, + err, req->args->opcode, req->out.h.error); + goto err; /* error already set */ + } + + if ((oh->unique & ~FUSE_INT_REQ_BIT) != req->in.h.unique) { + pr_warn("Unpexted seqno mismatch, expected: %llu got %llu\n", + req->in.h.unique, oh->unique & ~FUSE_INT_REQ_BIT); + err = -ENOENT; + goto seterr; + } + + /* Is it an interrupt reply ID? */ + if (oh->unique & FUSE_INT_REQ_BIT) { + err = 0; + if (oh->error == -ENOSYS) + fc->no_interrupt = 1; + else if (oh->error == -EAGAIN) { + /* XXX Interrupts not handled yet */ + /* err = queue_interrupt(req); */ + pr_warn("Intrerupt EAGAIN not supported yet"); + err = -EINVAL; + } + + goto seterr; + } + + return 0; + +seterr: + pr_devel("%s:%d err=%d op=%d req-ret=%d", __func__, __LINE__, err, + req->args->opcode, req->out.h.error); + oh->error = err; +err: + pr_devel("%s:%d err=%d op=%d req-ret=%d", __func__, __LINE__, err, + req->args->opcode, req->out.h.error); + return err; +} + +static int fuse_uring_copy_from_ring(struct fuse_ring *ring, + struct fuse_req *req, + struct fuse_ring_ent *ent) +{ + struct fuse_ring_req __user *rreq = ent->rreq; + struct fuse_copy_state cs; + struct fuse_args *args = req->args; + struct iov_iter iter; + int err; + int res_arg_len; + + err = copy_from_user(&res_arg_len, &rreq->in_out_arg_len, + sizeof(res_arg_len)); + if (err) + return err; + + err = import_ubuf(ITER_SOURCE, (void __user *)&rreq->in_out_arg, + ent->max_arg_len, &iter); + if (err) + return err; + + fuse_copy_init(&cs, 0, &iter); + cs.is_uring = 1; + cs.req = req; + + return fuse_copy_out_args(&cs, args, res_arg_len); +} + + /* + * Copy data from the req to the ring buffer + */ +static int fuse_uring_copy_to_ring(struct fuse_ring *ring, struct fuse_req *req, + struct fuse_ring_ent *ent) +{ + struct fuse_ring_req __user *rreq = ent->rreq; + struct fuse_copy_state cs; + struct fuse_args *args = req->args; + int err, res; + struct iov_iter iter; + + err = import_ubuf(ITER_DEST, (void __user *)&rreq->in_out_arg, + ent->max_arg_len, &iter); + if (err) { + pr_info("Import user buffer failed\n"); + return err; + } + + fuse_copy_init(&cs, 1, &iter); + cs.is_uring = 1; + cs.req = req; + err = fuse_copy_args(&cs, args->in_numargs, args->in_pages, + (struct fuse_arg *)args->in_args, 0); + if (err) { + pr_info("%s fuse_copy_args failed\n", __func__); + return err; + } + + BUILD_BUG_ON((sizeof(rreq->in_out_arg_len) != sizeof(cs.ring.offset))); + res = copy_to_user(&rreq->in_out_arg_len, &cs.ring.offset, + sizeof(rreq->in_out_arg_len)); + err = res > 0 ? -EFAULT : res; + + return err; +} + +static int +fuse_uring_prepare_send(struct fuse_ring_ent *ring_ent) +{ + struct fuse_ring_req *rreq = ring_ent->rreq; + struct fuse_ring_queue *queue = ring_ent->queue; + struct fuse_ring *ring = queue->ring; + struct fuse_req *req = ring_ent->fuse_req; + int err = 0, res; + + if (WARN_ON(ring_ent->state != FRRS_FUSE_REQ)) { + pr_err("qid=%d tag=%d ring-req=%p buf_req=%p invalid state %d on send\n", + queue->qid, ring_ent->tag, ring_ent, rreq, + ring_ent->state); + err = -EIO; + } + + if (err) + return err; + + pr_devel("%s qid=%d tag=%d state=%d cmd-done op=%d unique=%llu\n", + __func__, queue->qid, ring_ent->tag, ring_ent->state, + req->in.h.opcode, req->in.h.unique); + + /* copy the request */ + err = fuse_uring_copy_to_ring(ring, req, ring_ent); + if (unlikely(err)) { + pr_info("Copy to ring failed: %d\n", err); + goto err; + } + + /* copy fuse_in_header */ + res = copy_to_user(&rreq->in, &req->in.h, sizeof(rreq->in)); + err = res > 0 ? -EFAULT : res; + if (err) + goto err; + + set_bit(FR_SENT, &req->flags); + return 0; + +err: + fuse_uring_req_end(ring_ent, true, err); + return err; +} + +/* + * Write data to the ring buffer and send the request to userspace, + * userspace will read it + * This is comparable with classical read(/dev/fuse) + */ +static int fuse_uring_send_next_to_ring(struct fuse_ring_ent *ring_ent) +{ + int err = 0; + + err = fuse_uring_prepare_send(ring_ent); + if (err) + goto err; + + io_uring_cmd_complete_in_task(ring_ent->cmd, + fuse_uring_async_send_to_ring); + return 0; + +err: + return err; +} + /* * Put a ring request onto hold, it is no longer used for now. */ @@ -206,6 +430,166 @@ static void fuse_uring_ent_avail(struct fuse_ring_ent *ring_ent, ring_ent->state = FRRS_WAIT; } +/* + * Assign a fuse queue entry to the given entry + */ +static void fuse_uring_add_req_to_ring_ent(struct fuse_ring_ent *ring_ent, + struct fuse_req *req) +{ + lockdep_assert_held(&ring_ent->queue->lock); + + if (WARN_ON_ONCE(ring_ent->state != FRRS_WAIT && + ring_ent->state != FRRS_COMMIT)) { + pr_warn("%s qid=%d tag=%d state=%d async=%d\n", __func__, + ring_ent->queue->qid, ring_ent->tag, ring_ent->state, + ring_ent->async); + } + list_del_init(&req->list); + clear_bit(FR_PENDING, &req->flags); + ring_ent->fuse_req = req; + ring_ent->state = FRRS_FUSE_REQ; +} + +/* + * Release the ring entry and fetch the next fuse request if available + * + * @return true if a new request has been fetched + */ +static bool fuse_uring_ent_assign_req(struct fuse_ring_ent *ring_ent) + __must_hold(&queue->lock) +{ + struct fuse_req *req = NULL; + struct fuse_ring_queue *queue = ring_ent->queue; + struct list_head *req_queue = ring_ent->async ? + &queue->async_fuse_req_queue : + &queue->sync_fuse_req_queue; + + lockdep_assert_held(&queue->lock); + + /* get and assign the next entry while it is still holding the lock */ + if (!list_empty(req_queue)) { + req = list_first_entry(req_queue, struct fuse_req, list); + fuse_uring_add_req_to_ring_ent(ring_ent, req); + list_del_init(&ring_ent->list); + } + + return req ? true : false; +} + +/* + * Read data from the ring buffer, which user space has written to + * This is comparible with handling of classical write(/dev/fuse). + * Also make the ring request available again for new fuse requests. + */ +static void fuse_uring_commit(struct fuse_ring_ent *ring_ent, + unsigned int issue_flags) +{ + struct fuse_ring *ring = ring_ent->queue->ring; + struct fuse_conn *fc = ring->fc; + struct fuse_ring_req *rreq = ring_ent->rreq; + struct fuse_req *req = ring_ent->fuse_req; + ssize_t err = 0; + bool set_err = false; + + err = copy_from_user(&req->out.h, &rreq->out, sizeof(req->out.h)); + if (err) { + req->out.h.error = err; + goto out; + } + + err = fuse_uring_out_header_has_err(&req->out.h, req, fc); + if (err) { + /* req->out.h.error already set */ + pr_devel("%s:%d err=%zd oh->err=%d\n", __func__, __LINE__, err, + req->out.h.error); + goto out; + } + + err = fuse_uring_copy_from_ring(ring, req, ring_ent); + if (err) + set_err = true; + +out: + pr_devel("%s:%d ret=%zd op=%d req-ret=%d\n", __func__, __LINE__, err, + req->args->opcode, req->out.h.error); + fuse_uring_req_end(ring_ent, set_err, err); +} + +/* + * Get the next fuse req and send it + */ +static void fuse_uring_next_fuse_req(struct fuse_ring_ent *ring_ent, + struct fuse_ring_queue *queue) +{ + int has_next, err; + int prev_state = ring_ent->state; + + WARN_ON_ONCE(!list_empty(&ring_ent->list)); + + do { + spin_lock(&queue->lock); + has_next = fuse_uring_ent_assign_req(ring_ent); + if (!has_next) { + fuse_uring_ent_avail(ring_ent, queue); + spin_unlock(&queue->lock); + break; /* no request left */ + } + spin_unlock(&queue->lock); + + err = fuse_uring_send_next_to_ring(ring_ent); + if (err) { + ring_ent->state = prev_state; + continue; + } + + err = 0; + spin_lock(&queue->lock); + ring_ent->state = FRRS_USERSPACE; + list_add(&ring_ent->list, &queue->ent_in_userspace); + spin_unlock(&queue->lock); + } while (err); +} + +/* FUSE_URING_REQ_COMMIT_AND_FETCH handler */ +static int fuse_uring_commit_fetch(struct fuse_ring_ent *ring_ent, + struct io_uring_cmd *cmd, int issue_flags) +__releases(ring_ent->queue->lock) +{ + int err; + struct fuse_ring_queue *queue = ring_ent->queue; + struct fuse_ring *ring = queue->ring; + + err = -ENOTCONN; + if (unlikely(!ring->ready)) { + pr_info("commit and fetch, but fuse-uring is not ready."); + return err; + } + + err = -EALREADY; + if (ring_ent->state != FRRS_USERSPACE) { + pr_info("qid=%d tag=%d state %d SQE already handled\n", + queue->qid, ring_ent->tag, ring_ent->state); + return err; + } + + fuse_ring_ring_ent_unset_userspace(ring_ent); + + ring_ent->cmd = cmd; + spin_unlock(&queue->lock); + + /* without the queue lock, as other locks are taken */ + fuse_uring_commit(ring_ent, issue_flags); + + /* + * Fetching the next request is absolutely required as queued + * fuse requests would otherwise not get processed - committing + * and fetching is done in one step vs legacy fuse, which has separated + * read (fetch request) and write (commit result). + */ + fuse_uring_next_fuse_req(ring_ent, queue); + return 0; +} + /* * fuse_uring_req_fetch command handling */ @@ -250,6 +634,7 @@ __must_hold(ring_ent->queue->lock) return 0; } +/* FUSE_URING_REQ_FETCH handler */ static int fuse_uring_fetch(struct fuse_ring_ent *ring_ent, struct io_uring_cmd *cmd, unsigned int issue_flags) __releases(ring_ent->queue->lock) @@ -339,10 +724,32 @@ int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) if (unlikely(fc->aborted || queue->stopped)) goto err_unlock; + ring_ent->rreq = (void __user *)cmd_req->buf_ptr; + ring_ent->max_arg_len = cmd_req->buf_len - + offsetof(struct fuse_ring_req, in_out_arg); + ret = -EINVAL; + if (cmd_req->buf_len < ring->req_buf_sz) { + pr_info("Invalid req buf len, expected: %zd got %d\n", + ring->req_buf_sz, cmd_req->buf_len); + goto err_unlock; + } + + ring_ent->rreq = (void __user *)cmd_req->buf_ptr; + ring_ent->max_arg_len = cmd_req->buf_len - + offsetof(struct fuse_ring_req, in_out_arg); + if (cmd_req->buf_len < ring->req_buf_sz) { + pr_info("Invalid req buf len, expected: %zd got %d\n", + ring->req_buf_sz, cmd_req->buf_len); + goto err_unlock; + } + switch (cmd_op) { case FUSE_URING_REQ_FETCH: ret = fuse_uring_fetch(ring_ent, cmd, issue_flags); break; + case FUSE_URING_REQ_COMMIT_AND_FETCH: + ret = fuse_uring_commit_fetch(ring_ent, cmd, issue_flags); + break; default: ret = -EINVAL; pr_devel("Unknown uring command %d", cmd_op); diff --git a/fs/fuse/dev_uring_i.h b/fs/fuse/dev_uring_i.h index 6561f4178cac..697963e5d524 100644 --- a/fs/fuse/dev_uring_i.h +++ b/fs/fuse/dev_uring_i.h @@ -26,6 +26,9 @@ enum fuse_ring_req_state { /* The ring request waits for a new fuse request */ FRRS_WAIT, + /* The ring req got assigned a fuse req */ + FRRS_FUSE_REQ, + /* request is in or on the way to user space */ FRRS_USERSPACE, }; @@ -47,6 +50,17 @@ struct fuse_ring_ent { struct list_head list; struct io_uring_cmd *cmd; + + /* fuse_req assigned to the ring entry */ + struct fuse_req *fuse_req; + + /* + * buffer provided by fuse server + */ + struct fuse_ring_req __user *rreq; + + /* struct fuse_ring_req::in_out_arg size*/ + size_t max_arg_len; }; struct fuse_ring_queue { @@ -69,6 +83,13 @@ struct fuse_ring_queue { struct list_head async_ent_avail_queue; struct list_head sync_ent_avail_queue; + /* fuse fg/bg request types */ + struct list_head async_fuse_req_queue; + struct list_head sync_fuse_req_queue; + + /* entries sent to userspace */ + struct list_head ent_in_userspace; + /* * available number of sync requests, * loosely bound to fuse foreground requests -- 2.43.0