On 1/7/25 16:54, Luis Henriques wrote:
[...]
@@ -785,10 +830,22 @@ static void fuse_uring_do_register(struct fuse_ring_ent *ring_ent,
unsigned int issue_flags)
{
struct fuse_ring_queue *queue = ring_ent->queue;
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_conn *fc = ring->fc;
+ struct fuse_iqueue *fiq = &fc->iq;
spin_lock(&queue->lock);
fuse_uring_ent_avail(ring_ent, queue);
spin_unlock(&queue->lock);
+
+ if (!ring->ready) {
+ bool ready = is_ring_ready(ring, queue->qid);
+
+ if (ready) {
+ WRITE_ONCE(ring->ready, true);
+ fiq->ops = &fuse_io_uring_ops;
Shouldn't we be taking the fiq->lock to protect the above operation?
I switched the order and changed it to WRITE_ONCE. fiq->lock would
require that doing the operations would also hold lock.
Also see "[PATCH v9 16/17] fuse: block request allocation until",
there should be no races anyone.
+ }
+ }
}
/*
@@ -979,3 +1036,119 @@ int __maybe_unused fuse_uring_cmd(struct io_uring_cmd *cmd,
return -EIOCBQUEUED;
}
+
+/*
+ * This prepares and sends the ring request in fuse-uring task context.
+ * User buffers are not mapped yet - the application does not have permission
+ * to write to it - this has to be executed in ring task context.
+ */
+static void
+fuse_uring_send_req_in_task(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct fuse_ring_ent *ent = uring_cmd_to_ring_ent(cmd);
+ struct fuse_ring_queue *queue = ent->queue;
+ int err;
+
+ if (unlikely(issue_flags & IO_URING_F_TASK_DEAD)) {
+ err = -ECANCELED;
+ goto terminating;
+ }
+
+ err = fuse_uring_prepare_send(ent);
+ if (err)
+ goto err;
Suggestion: simplify this function flow. Something like:
int err = 0;
if (unlikely(issue_flags & IO_URING_F_TASK_DEAD))
err = -ECANCELED;
else if (fuse_uring_prepare_send(ent)) {
fuse_uring_next_fuse_req(ent, queue, issue_flags);
return;
}
spin_lock(&queue->lock);
[...]
That makes it look like fuse_uring_prepare_send is not an
error, but expected. How about like this?
static void
fuse_uring_send_req_in_task(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
struct fuse_ring_ent *ent = uring_cmd_to_ring_ent(cmd);
struct fuse_ring_queue *queue = ent->queue;
int err;
if (!(issue_flags & IO_URING_F_TASK_DEAD)) {
err = fuse_uring_prepare_send(ent);
if (err) {
fuse_uring_next_fuse_req(ent, queue, issue_flags);
return;
}
} else {
err = -ECANCELED;
}
spin_lock(&queue->lock);
ent->state = FRRS_USERSPACE;
list_move(&ent->list, &queue->ent_in_userspace);
spin_unlock(&queue->lock);
io_uring_cmd_done(cmd, err, 0, issue_flags);
ent->cmd = NULL;
}
Thanks,
Bernd