[RFC 1/2] io_uring: create io_queue_async() function

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch pulls out the code from __io_queue_sqe() and creates a new
io_queue_async() function.  It doesn't change run time, but it's a bit
cleaner and will be used in future patches.

Signed-off-by: <bijan.mottahedeh@xxxxxxxxxx>
Reviewed-by: Dan Carpenter <dan.carpenter@xxxxxxxxxx>
---
 fs/io_uring.c | 59 +++++++++++++++++++++++++++++++++++------------------------
 1 file changed, 35 insertions(+), 24 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5415fcc..acb213c 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -362,6 +362,8 @@ struct io_submit_state {
 static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
 				 long res);
 static void __io_free_req(struct io_kiocb *req);
+static int io_queue_async(struct io_ring_ctx *ctx, struct io_kiocb *req,
+			  struct sqe_submit *s);
 
 static struct kmem_cache *req_cachep;
 
@@ -2437,6 +2439,35 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
 	return 0;
 }
 
+static int io_queue_async(struct io_ring_ctx *ctx, struct io_kiocb *req,
+			  struct sqe_submit *s)
+{
+	struct io_uring_sqe *sqe_copy;
+	struct async_list *list;
+
+	/* async context always use a copy of the sqe */
+	sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
+	if (!sqe_copy)
+		return -ENOMEM;
+
+	s->sqe = sqe_copy;
+
+	memcpy(&req->submit, s, sizeof(*s));
+	list = io_async_list_from_sqe(ctx, s->sqe);
+	if (!io_add_to_prev_work(list, req)) {
+		if (list)
+			atomic_inc(&list->cnt);
+		INIT_WORK(&req->work, io_sq_wq_submit_work);
+		io_queue_async_work(ctx, req);
+	}
+
+	/*
+	 * Queued up for async execution, worker will release
+	 * submit reference when the iocb is actually submitted.
+	 */
+	return 0;
+}
+
 static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
 			struct sqe_submit *s, bool force_nonblock)
 {
@@ -2448,30 +2479,10 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
 	 * We async punt it if the file wasn't marked NOWAIT, or if the file
 	 * doesn't support non-blocking read/write attempts
 	 */
-	if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
-	    (req->flags & REQ_F_MUST_PUNT))) {
-		struct io_uring_sqe *sqe_copy;
-
-		sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
-		if (sqe_copy) {
-			struct async_list *list;
-
-			s->sqe = sqe_copy;
-			memcpy(&req->submit, s, sizeof(*s));
-			list = io_async_list_from_sqe(ctx, s->sqe);
-			if (!io_add_to_prev_work(list, req)) {
-				if (list)
-					atomic_inc(&list->cnt);
-				INIT_WORK(&req->work, io_sq_wq_submit_work);
-				io_queue_async_work(ctx, req);
-			}
-
-			/*
-			 * Queued up for async execution, worker will release
-			 * submit reference when the iocb is actually submitted.
-			 */
-			return 0;
-		}
+	if (ret == -EAGAIN &&
+	    (!(req->flags & REQ_F_NOWAIT) || (req->flags & REQ_F_MUST_PUNT)) &&
+	    !io_queue_async(ctx, req, s)) {
+		return 0;
 	}
 
 	/* drop submission reference */
-- 
1.8.3.1




[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux