[PATCH 2/2] io_uring: fix linked fixed !iter rw

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



As loop_rw_iter() may need mm even for fixed requests, update
io_req_needs_user(), so the offloading thread and io-wq can handle it as
well.

Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx>
---
 fs/io_uring.c | 41 ++++++++++++++++++++++++-----------------
 1 file changed, 24 insertions(+), 17 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 566e987c6dab..d84b69872967 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -525,12 +525,13 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
 	}
 }
 
-static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
+static inline bool io_req_needs_user(struct io_kiocb *req)
 {
-	u8 opcode = READ_ONCE(sqe->opcode);
+	struct file *f = req->file;
+	u8 opcode = READ_ONCE(req->submit.sqe->opcode);
 
-	return !(opcode == IORING_OP_READ_FIXED ||
-		 opcode == IORING_OP_WRITE_FIXED);
+	return !((opcode == IORING_OP_READ_FIXED && f->f_op->read_iter) ||
+		(opcode == IORING_OP_WRITE_FIXED && f->f_op->write_iter));
 }
 
 static inline bool io_prep_async_work(struct io_kiocb *req,
@@ -559,7 +560,7 @@ static inline bool io_prep_async_work(struct io_kiocb *req,
 				req->work.flags |= IO_WQ_WORK_UNBOUND;
 			break;
 		}
-		if (io_sqe_needs_user(req->submit.sqe))
+		if (io_req_needs_user(req))
 			req->work.flags |= IO_WQ_WORK_NEEDS_USER;
 	}
 
@@ -1625,11 +1626,11 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
 		struct iovec iovec;
 		ssize_t nr;
 
-		if (iter_is_iovec(&iter->it)) {
-			iovec = iov_iter_iovec(&iter->it);
-		} else {
+		if (iov_iter_is_bvec(&iter->it)) {
 			iovec.iov_base = (void __user *)iter->ubuf;
 			iovec.iov_len = iov_iter_count(&iter->it);
+		} else {
+			iovec = iov_iter_iovec(&iter->it);
 		}
 
 		if (rw == READ) {
@@ -3041,14 +3042,6 @@ static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
 		goto err_req;
 	}
 
-	ret = io_req_set_file(state, req);
-	if (unlikely(ret)) {
-err_req:
-		io_cqring_add_event(req, ret);
-		io_double_put_req(req);
-		return;
-	}
-
 	/*
 	 * If we already have a head request, queue this one for async
 	 * submittal once the head completes. If we don't have a head but
@@ -3092,6 +3085,11 @@ static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
 	} else {
 		io_queue_sqe(req);
 	}
+
+	return;
+err_req:
+	io_cqring_add_event(req, ret);
+	io_double_put_req(req);
 }
 
 /*
@@ -3197,6 +3195,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 	for (i = 0; i < nr; i++) {
 		struct io_kiocb *req;
 		unsigned int sqe_flags;
+		int ret;
 
 		req = io_get_req(ctx, statep);
 		if (unlikely(!req)) {
@@ -3209,7 +3208,14 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 			break;
 		}
 
-		if (io_sqe_needs_user(req->submit.sqe) && !*mm) {
+		ret = io_req_set_file(statep, req);
+		if (unlikely(ret)) {
+			io_cqring_add_event(req, ret);
+			__io_free_req(req);
+			break;
+		}
+
+		if (io_req_needs_user(req) && !*mm) {
 			mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
 			if (!mm_fault) {
 				use_mm(ctx->sqo_mm);
@@ -3217,6 +3223,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 			}
 		}
 
+
 		sqe_flags = req->submit.sqe->flags;
 
 		req->submit.ring_file = ring_file;
-- 
2.24.0




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux