Re: [PATCH 3/3] io_uring: support buffer selection

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 2/24/20 8:57 AM, Jens Axboe wrote:
>> ... and call io_send_recv_buffer_select() from here without holding
>> any extra locks in this function. This function is then called from
>> io_issue_sqe() (no extra locks held there either), which is called
>> from __io_queue_sqe() (no extra locks AFAICS), which is called from
>> places like task work.
>>
>> Am I missing something?
> 
> Submission is all done under the ctx->uring_lock mutex, though the async
> stuff does not. So for the normal path we should all be fine, as we'll
> be serialized for that. We just need to ensure we do buffer select when
> we prep the request for async execution, so the workers never have to do
> it. Either that, or ensure the workers grab the mutex before doing the
> grab (less ideal).
> 
>> It might in general be helpful to have more lockdep assertions in this
>> code, both to help the reader understand the locking rules and to help
>> verify locking correctness.
> 
> Agree, that'd be a good addition.

Tried to cater to both, here's an incremental that ensures we also grab
the uring_lock mutex for the async offload case, and adds lockdep
annotation for this particular case.


diff --git a/fs/io_uring.c b/fs/io_uring.c
index ba8d4e2d9f99..792ef01a521c 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2173,7 +2173,7 @@ static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
 }
 
 static struct io_buffer *io_buffer_select(struct io_kiocb *req, int gid,
-					  void *buf)
+					  void *buf, bool needs_lock)
 {
 	struct list_head *list;
 	struct io_buffer *kbuf;
@@ -2181,17 +2181,34 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, int gid,
 	if (req->flags & REQ_F_BUFFER_SELECTED)
 		return buf;
 
+	/*
+	 * "Normal" inline submissions always hold the uring_lock, since we
+	 * grab it from the system call. Same is true for the SQPOLL offload.
+	 * The only exception is when we've detached the request and issue it
+	 * from an async worker thread, grab the lock for that case.
+	 */
+	if (needs_lock)
+		mutex_lock(&req->ctx->uring_lock);
+
+	lockdep_assert_held(&req->ctx->uring_lock);
+
 	list = idr_find(&req->ctx->io_buffer_idr, gid);
-	if (!list || list_empty(list))
-		return ERR_PTR(-ENOBUFS);
+	if (list && !list_empty(list)) {
+		kbuf = list_first_entry(list, struct io_buffer, list);
+		list_del(&kbuf->list);
+	} else {
+		kbuf = ERR_PTR(-ENOBUFS);
+	}
+
+	if (needs_lock)
+		mutex_unlock(&req->ctx->uring_lock);
 
-	kbuf = list_first_entry(list, struct io_buffer, list);
-	list_del(&kbuf->list);
 	return kbuf;
 }
 
 static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
-			       struct iovec **iovec, struct iov_iter *iter)
+			       struct iovec **iovec, struct iov_iter *iter,
+			       bool needs_lock)
 {
 	void __user *buf = u64_to_user_ptr(req->rw.addr);
 	size_t sqe_len = req->rw.len;
@@ -2215,7 +2232,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
 			int gid;
 
 			gid = (int) (unsigned long) req->rw.kiocb.private;
-			kbuf = io_buffer_select(req, gid, buf);
+			kbuf = io_buffer_select(req, gid, buf, needs_lock);
 			if (IS_ERR(kbuf)) {
 				*iovec = NULL;
 				return PTR_ERR(kbuf);
@@ -2369,7 +2386,7 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 	io = req->io;
 	io->rw.iov = io->rw.fast_iov;
 	req->io = NULL;
-	ret = io_import_iovec(READ, req, &io->rw.iov, &iter);
+	ret = io_import_iovec(READ, req, &io->rw.iov, &iter, !force_nonblock);
 	req->io = io;
 	if (ret < 0)
 		return ret;
@@ -2387,7 +2404,7 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
 	size_t iov_count;
 	ssize_t io_size, ret;
 
-	ret = io_import_iovec(READ, req, &iovec, &iter);
+	ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock);
 	if (ret < 0)
 		return ret;
 
@@ -2459,7 +2476,7 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 	io = req->io;
 	io->rw.iov = io->rw.fast_iov;
 	req->io = NULL;
-	ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter);
+	ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter, !force_nonblock);
 	req->io = io;
 	if (ret < 0)
 		return ret;
@@ -2477,7 +2494,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
 	size_t iov_count;
 	ssize_t ret, io_size;
 
-	ret = io_import_iovec(WRITE, req, &iovec, &iter);
+	ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock);
 	if (ret < 0)
 		return ret;
 
@@ -2910,7 +2927,8 @@ static int io_provide_buffer_prep(struct io_kiocb *req,
 	return 0;
 }
 
-static int io_provide_buffer(struct io_kiocb *req, struct io_kiocb **nxt)
+static int io_provide_buffer(struct io_kiocb *req, struct io_kiocb **nxt,
+			     bool force_nonblock)
 {
 	struct io_provide_buf *p = &req->pbuf;
 	struct io_ring_ctx *ctx = req->ctx;
@@ -2918,6 +2936,17 @@ static int io_provide_buffer(struct io_kiocb *req, struct io_kiocb **nxt)
 	struct io_buffer *buf;
 	int ret = 0;
 
+	/*
+	 * "Normal" inline submissions always hold the uring_lock, since we
+	 * grab it from the system call. Same is true for the SQPOLL offload.
+	 * The only exception is when we've detached the request and issue it
+	 * from an async worker thread, grab the lock for that case.
+	 */
+	if (!force_nonblock)
+		mutex_lock(&ctx->uring_lock);
+
+	lockdep_assert_held(&ctx->uring_lock);
+
 	list = idr_find(&ctx->io_buffer_idr, p->gid);
 	if (!list) {
 		list = kmalloc(sizeof(*list), GFP_KERNEL);
@@ -2950,6 +2979,8 @@ static int io_provide_buffer(struct io_kiocb *req, struct io_kiocb **nxt)
 	list_add(&buf->list, list);
 	ret = buf->bid;
 out:
+	if (!force_nonblock)
+		mutex_unlock(&ctx->uring_lock);
 	if (ret < 0)
 		req_set_fail_links(req);
 	io_cqring_add_event(req, ret);
@@ -3387,14 +3418,15 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
 
 static struct io_buffer *io_send_recv_buffer_select(struct io_kiocb *req,
 						    struct io_buffer **kbuf,
-						    int *cflags)
+						    int *cflags,
+						    bool needs_lock)
 {
 	struct io_sr_msg *sr = &req->sr_msg;
 
 	if (!(req->flags & REQ_F_BUFFER_SELECT))
 		return req->sr_msg.buf;
 
-	*kbuf = io_buffer_select(req, sr->gid, sr->kbuf);
+	*kbuf = io_buffer_select(req, sr->gid, sr->kbuf, needs_lock);
 	if (IS_ERR(*kbuf))
 		return *kbuf;
 
@@ -3427,7 +3459,8 @@ static int io_send(struct io_kiocb *req, struct io_kiocb **nxt,
 		void __user *buf;
 		unsigned flags;
 
-		buf = io_send_recv_buffer_select(req, &kbuf, &cflags);
+		buf = io_send_recv_buffer_select(req, &kbuf, &cflags,
+							!force_nonblock);
 		if (IS_ERR(buf)) {
 			ret = PTR_ERR(buf);
 			goto out;
@@ -3592,7 +3625,8 @@ static int io_recv(struct io_kiocb *req, struct io_kiocb **nxt,
 		void __user *buf;
 		unsigned flags;
 
-		buf = io_send_recv_buffer_select(req, &kbuf, &cflags);
+		buf = io_send_recv_buffer_select(req, &kbuf, &cflags,
+							!force_nonblock);
 		if (IS_ERR(buf)) {
 			ret = PTR_ERR(buf);
 			goto out;
@@ -4903,7 +4937,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 			if (ret)
 				break;
 		}
-		ret = io_provide_buffer(req, nxt);
+		ret = io_provide_buffer(req, nxt, force_nonblock);
 		break;
 	default:
 		ret = -EINVAL;

-- 
Jens Axboe




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux