[PATCH RFC v2 19/19] fuse: {uring} Optimize async sends

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is to avoid using async completion tasks
(i.e. context switches) when not needed.

Cc: io-uring@xxxxxxxxxxxxxxx
Signed-off-by: Bernd Schubert <bschubert@xxxxxxx>

---
This condition should be better verified by io-uring developers.

} else if (current->io_uring) {
    /* There are two cases here
     * 1) fuse-server side uses multiple threads accessing
     *    the ring
     * 2) IO requests through io-uring
     */
    send_in_task = true;
    issue_flags = 0;
---
 fs/fuse/dev_uring.c | 57 ++++++++++++++++++++++++++++++++++++++++++-----------
 1 file changed, 46 insertions(+), 11 deletions(-)

diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c
index cdc5836edb6e..74407e5e86fa 100644
--- a/fs/fuse/dev_uring.c
+++ b/fs/fuse/dev_uring.c
@@ -32,7 +32,8 @@
 #include <linux/io_uring/cmd.h>
 
 static void fuse_uring_req_end_and_get_next(struct fuse_ring_ent *ring_ent,
-					    bool set_err, int error);
+					    bool set_err, int error,
+					    unsigned int issue_flags);
 
 static void fuse_ring_ring_ent_unset_userspace(struct fuse_ring_ent *ent)
 {
@@ -682,7 +683,9 @@ static int fuse_uring_copy_to_ring(struct fuse_ring *ring, struct fuse_req *req,
  * userspace will read it
  * This is comparable with classical read(/dev/fuse)
  */
-static void fuse_uring_send_to_ring(struct fuse_ring_ent *ring_ent)
+static void fuse_uring_send_to_ring(struct fuse_ring_ent *ring_ent,
+				    unsigned int issue_flags,
+				    bool send_in_task)
 {
 	struct fuse_ring *ring = ring_ent->queue->ring;
 	struct fuse_ring_req *rreq = ring_ent->rreq;
@@ -723,13 +726,16 @@ static void fuse_uring_send_to_ring(struct fuse_ring_ent *ring_ent)
 		 __func__, ring_ent->queue->qid, ring_ent->tag, ring_ent->state,
 		 rreq->in.opcode, rreq->in.unique);
 
-	io_uring_cmd_complete_in_task(ring_ent->cmd,
-				      fuse_uring_async_send_to_ring);
+	if (send_in_task)
+		io_uring_cmd_complete_in_task(ring_ent->cmd,
+					      fuse_uring_async_send_to_ring);
+	else
+		io_uring_cmd_done(ring_ent->cmd, 0, 0, issue_flags);
 
 	return;
 
 err:
-	fuse_uring_req_end_and_get_next(ring_ent, true, err);
+	fuse_uring_req_end_and_get_next(ring_ent, true, err, issue_flags);
 }
 
 /*
@@ -806,7 +812,8 @@ static bool fuse_uring_ent_release_and_fetch(struct fuse_ring_ent *ring_ent)
  * has lock/unlock/lock to avoid holding the lock on calling fuse_request_end
  */
 static void fuse_uring_req_end_and_get_next(struct fuse_ring_ent *ring_ent,
-					    bool set_err, int error)
+					    bool set_err, int error,
+					    unsigned int issue_flags)
 {
 	struct fuse_req *req = ring_ent->fuse_req;
 	int has_next;
@@ -822,7 +829,7 @@ static void fuse_uring_req_end_and_get_next(struct fuse_ring_ent *ring_ent,
 	has_next = fuse_uring_ent_release_and_fetch(ring_ent);
 	if (has_next) {
 		/* called within uring context - use provided flags */
-		fuse_uring_send_to_ring(ring_ent);
+		fuse_uring_send_to_ring(ring_ent, issue_flags, false);
 	}
 }
 
@@ -857,7 +864,7 @@ static void fuse_uring_commit_and_release(struct fuse_dev *fud,
 out:
 	pr_devel("%s:%d ret=%zd op=%d req-ret=%d\n", __func__, __LINE__, err,
 		 req->args->opcode, req->out.h.error);
-	fuse_uring_req_end_and_get_next(ring_ent, set_err, err);
+	fuse_uring_req_end_and_get_next(ring_ent, set_err, err, issue_flags);
 }
 
 /*
@@ -1156,10 +1163,12 @@ int fuse_uring_queue_fuse_req(struct fuse_conn *fc, struct fuse_req *req)
 	struct fuse_ring_queue *queue;
 	struct fuse_ring_ent *ring_ent = NULL;
 	int res;
-	int async = test_bit(FR_BACKGROUND, &req->flags) &&
-		    !req->args->async_blocking;
+	int async_req = test_bit(FR_BACKGROUND, &req->flags);
+	int async = async_req && !req->args->async_blocking;
 	struct list_head *ent_queue, *req_queue;
 	int qid;
+	bool send_in_task;
+	unsigned int issue_flags;
 
 	qid = fuse_uring_get_req_qid(req, ring, async);
 	queue = fuse_uring_get_queue(ring, qid);
@@ -1182,11 +1191,37 @@ int fuse_uring_queue_fuse_req(struct fuse_conn *fc, struct fuse_req *req)
 			list_first_entry(ent_queue, struct fuse_ring_ent, list);
 		list_del(&ring_ent->list);
 		fuse_uring_add_req_to_ring_ent(ring_ent, req);
+		if (current == queue->server_task) {
+			issue_flags = queue->uring_cmd_issue_flags;
+		} else if (current->io_uring) {
+			/* There are two cases here
+			 * 1) fuse-server side uses multiple threads accessing
+			 *    the ring. We only have stored issue_flags for
+			 *    into the queue for one thread (the first one
+			 *    that submits FUSE_URING_REQ_FETCH)
+			 * 2) IO requests through io-uring, we do not have
+			 *    issue flags at all for these
+			 */
+			send_in_task = true;
+			issue_flags = 0;
+		} else {
+			if (async_req) {
+				/*
+				 * page cache writes might hold an upper
+				 * spinlockl, which conflicts with the io-uring
+				 * mutex
+				 */
+				send_in_task = true;
+				issue_flags = 0;
+			} else {
+				issue_flags = IO_URING_F_UNLOCKED;
+			}
+		}
 	}
 	spin_unlock(&queue->lock);
 
 	if (ring_ent != NULL)
-		fuse_uring_send_to_ring(ring_ent);
+		fuse_uring_send_to_ring(ring_ent, issue_flags, send_in_task);
 
 	return 0;
 

-- 
2.40.1





[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [NTFS 3]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [NTFS 3]     [Samba]     [Device Mapper]     [CEPH Development]

  Powered by Linux