[PATCH 4/8] io_uring/kbuf: simplify __io_put_kbuf

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



As a preparation step remove an optimisation from __io_put_kbuf() trying
to use the locked cache. With that __io_put_kbuf_list() is only used
with ->io_buffers_comp, and we remove the explicit list argument.

Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx>
---
 io_uring/kbuf.c | 26 +++-----------------------
 io_uring/kbuf.h |  7 +++----
 2 files changed, 6 insertions(+), 27 deletions(-)

diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index ea9fb3c124e56..eae6cf502b57f 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -70,29 +70,9 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
 
 void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags)
 {
-	/*
-	 * We can add this buffer back to two lists:
-	 *
-	 * 1) The io_buffers_cache list. This one is protected by the
-	 *    ctx->uring_lock. If we already hold this lock, add back to this
-	 *    list as we can grab it from issue as well.
-	 * 2) The io_buffers_comp list. This one is protected by the
-	 *    ctx->completion_lock.
-	 *
-	 * We migrate buffers from the comp_list to the issue cache list
-	 * when we need one.
-	 */
-	if (issue_flags & IO_URING_F_UNLOCKED) {
-		struct io_ring_ctx *ctx = req->ctx;
-
-		spin_lock(&ctx->completion_lock);
-		__io_put_kbuf_list(req, len, &ctx->io_buffers_comp);
-		spin_unlock(&ctx->completion_lock);
-	} else {
-		lockdep_assert_held(&req->ctx->uring_lock);
-
-		__io_put_kbuf_list(req, len, &req->ctx->io_buffers_cache);
-	}
+	spin_lock(&req->ctx->completion_lock);
+	__io_put_kbuf_list(req, len);
+	spin_unlock(&req->ctx->completion_lock);
 }
 
 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index 310f94a0727a6..1f28770648298 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -160,14 +160,13 @@ static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
 	return ret;
 }
 
-static inline void __io_put_kbuf_list(struct io_kiocb *req, int len,
-				      struct list_head *list)
+static inline void __io_put_kbuf_list(struct io_kiocb *req, int len)
 {
 	if (req->flags & REQ_F_BUFFER_RING) {
 		__io_put_kbuf_ring(req, len, 1);
 	} else {
 		req->buf_index = req->kbuf->bgid;
-		list_add(&req->kbuf->list, list);
+		list_add(&req->kbuf->list, &req->ctx->io_buffers_comp);
 		req->flags &= ~REQ_F_BUFFER_SELECTED;
 	}
 }
@@ -179,7 +178,7 @@ static inline void io_kbuf_drop(struct io_kiocb *req)
 
 	spin_lock(&req->ctx->completion_lock);
 	/* len == 0 is fine here, non-ring will always drop all of it */
-	__io_put_kbuf_list(req, 0, &req->ctx->io_buffers_comp);
+	__io_put_kbuf_list(req, 0);
 	spin_unlock(&req->ctx->completion_lock);
 }
 
-- 
2.47.1





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux