[PATCH 1/2] io_uring: restructure io_{read,write} control flow

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Call io_async_list_note at the end if -EAGAIN is going to be returned;
we need iov_count for that, which we have (almost) at the same time as
we need to free iovec.

Instead of using a second return value reset the normal one after
passing it to io_rw_done.

Unless rw_verify_area returns -EAGAIN this shouldn't result in different
behavior.

This change should make it easier to punt a request to the workers by
returning -EAGAIN and still calling io_async_list_note if needed.

Signed-off-by: Stefan Bühler <source@xxxxxxxxxxxx>
---
 fs/io_uring.c | 89 ++++++++++++++++++++++-----------------------------
 1 file changed, 39 insertions(+), 50 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 84efb8956734..52e435a72b6f 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1062,26 +1062,24 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
 	ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
 	if (ret)
 		return ret;
-
 	iov_count = iov_iter_count(&iter);
+
 	ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
-	if (!ret) {
-		ssize_t ret2;
+	if (ret)
+		goto out_free;
 
-		/* Catch -EAGAIN return for forced non-blocking submission */
-		ret2 = call_read_iter(file, kiocb, &iter);
-		if (!force_nonblock || ret2 != -EAGAIN) {
-			io_rw_done(kiocb, ret2);
-		} else {
-			/*
-			 * If ->needs_lock is true, we're already in async
-			 * context.
-			 */
-			if (!s->needs_lock)
-				io_async_list_note(READ, req, iov_count);
-			ret = -EAGAIN;
-		}
+	/* Passthrough -EAGAIN return for forced non-blocking submission */
+	ret = call_read_iter(file, kiocb, &iter);
+	if (!(force_nonblock && ret == -EAGAIN)) {
+		io_rw_done(kiocb, ret);
+		ret = 0;
 	}
+
+out_free:
+	/* If ->needs_lock is true, we're already in async context. */
+	if (ret == -EAGAIN && !s->needs_lock)
+		io_async_list_note(READ, req, iov_count);
+
 	kfree(iovec);
 	return ret;
 }
@@ -1109,50 +1107,41 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
 	ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
 	if (ret)
 		return ret;
-
 	iov_count = iov_iter_count(&iter);
 
 	ret = -EAGAIN;
-	if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT)) {
-		/* If ->needs_lock is true, we're already in async context. */
-		if (!s->needs_lock)
-			io_async_list_note(WRITE, req, iov_count);
+	if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT))
 		goto out_free;
-	}
 
 	ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
-	if (!ret) {
-		ssize_t ret2;
+	if (ret)
+		goto out_free;
 
-		/*
-		 * Open-code file_start_write here to grab freeze protection,
-		 * which will be released by another thread in
-		 * io_complete_rw().  Fool lockdep by telling it the lock got
-		 * released so that it doesn't complain about the held lock when
-		 * we return to userspace.
-		 */
-		if (S_ISREG(file_inode(file)->i_mode)) {
-			__sb_start_write(file_inode(file)->i_sb,
-						SB_FREEZE_WRITE, true);
-			__sb_writers_release(file_inode(file)->i_sb,
-						SB_FREEZE_WRITE);
-		}
-		kiocb->ki_flags |= IOCB_WRITE;
+	/*
+	 * Open-code file_start_write here to grab freeze protection,
+	 * which will be released by another thread in
+	 * io_complete_rw().  Fool lockdep by telling it the lock got
+	 * released so that it doesn't complain about the held lock when
+	 * we return to userspace.
+	 */
+	if (S_ISREG(file_inode(file)->i_mode)) {
+		__sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true);
+		__sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+	}
+	kiocb->ki_flags |= IOCB_WRITE;
 
-		ret2 = call_write_iter(file, kiocb, &iter);
-		if (!force_nonblock || ret2 != -EAGAIN) {
-			io_rw_done(kiocb, ret2);
-		} else {
-			/*
-			 * If ->needs_lock is true, we're already in async
-			 * context.
-			 */
-			if (!s->needs_lock)
-				io_async_list_note(WRITE, req, iov_count);
-			ret = -EAGAIN;
-		}
+	/* Passthrough -EAGAIN return for forced non-blocking submission */
+	ret = call_write_iter(file, kiocb, &iter);
+	if (!(force_nonblock && ret == -EAGAIN)) {
+		io_rw_done(kiocb, ret);
+		ret = 0;
 	}
+
 out_free:
+	/* If ->needs_lock is true, we're already in async context. */
+	if (ret == -EAGAIN && !s->needs_lock)
+		io_async_list_note(WRITE, req, iov_count);
+
 	kfree(iovec);
 	return ret;
 }
-- 
2.20.1




[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [Samba]     [Device Mapper]     [CEPH Development]

  Powered by Linux