[PATCH 03/19] io_uring: make worker pool per ctx for uringlet mode

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Hao Xu <howeyxu@xxxxxxxxxxx>

For uringlet mode, make worker pool per ctx. This is much easier for
implementation. We can make it better later if it's necessary. In
uringlet mode, we need to find the specific ctx in a worker. Add a
member private for this. We set wq->task to NULL for uringlet as
a mark that this is a uringler io-wq.

Signed-off-by: Hao Xu <howeyxu@xxxxxxxxxxx>
---
 include/linux/io_uring_types.h |  1 +
 io_uring/io-wq.c               | 11 ++++++++++-
 io_uring/io-wq.h               |  4 ++++
 io_uring/io_uring.c            |  9 +++++++++
 io_uring/tctx.c                |  8 +++++++-
 5 files changed, 31 insertions(+), 2 deletions(-)

diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 677a25d44d7f..c8093e733a35 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -356,6 +356,7 @@ struct io_ring_ctx {
 	unsigned			sq_thread_idle;
 	/* protected by ->completion_lock */
 	unsigned			evfd_last_cq_tail;
+	struct io_wq			*let;
 };
 
 enum {
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index f631acbd50df..aaa58cbacf60 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -127,6 +127,8 @@ struct io_wq {
 
 	struct task_struct *task;
 
+	void *private;
+
 	struct io_wqe *wqes[];
 };
 
@@ -392,6 +394,11 @@ static bool io_queue_worker_create(struct io_worker *worker,
 	return false;
 }
 
+static inline bool io_wq_is_uringlet(struct io_wq *wq)
+{
+	return wq->private;
+}
+
 static void io_wqe_dec_running(struct io_worker *worker)
 {
 	struct io_wqe_acct *acct = io_wqe_get_acct(worker);
@@ -1153,6 +1160,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
 	wq->hash = data->hash;
 	wq->free_work = data->free_work;
 	wq->do_work = data->do_work;
+	wq->private = data->private;
 
 	ret = -ENOMEM;
 	for_each_node(node) {
@@ -1188,7 +1196,8 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
 		INIT_LIST_HEAD(&wqe->all_list);
 	}
 
-	wq->task = get_task_struct(data->task);
+	if (data->task)
+		wq->task = get_task_struct(data->task);
 	atomic_set(&wq->worker_refs, 1);
 	init_completion(&wq->worker_done);
 	return wq;
diff --git a/io_uring/io-wq.h b/io_uring/io-wq.h
index 31228426d192..b9f5ce4493e0 100644
--- a/io_uring/io-wq.h
+++ b/io_uring/io-wq.h
@@ -41,6 +41,7 @@ struct io_wq_data {
 	struct task_struct *task;
 	io_wq_work_fn *do_work;
 	free_work_fn *free_work;
+	void *private;
 };
 
 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
@@ -80,4 +81,7 @@ static inline bool io_wq_current_is_worker(void)
 	return in_task() && (current->flags & PF_IO_WORKER) &&
 		current->worker_private;
 }
+
+extern struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
+					struct task_struct *task);
 #endif
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 5e4f5b1684dd..cb011a04653b 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -3318,6 +3318,15 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
 	ret = io_sq_offload_create(ctx, p);
 	if (ret)
 		goto err;
+
+	if (ctx->flags & IORING_SETUP_URINGLET) {
+		ctx->let = io_init_wq_offload(ctx, current);
+		if (IS_ERR(ctx->let)) {
+			ret = PTR_ERR(ctx->let);
+			goto err;
+		}
+	}
+
 	/* always set a rsrc node */
 	ret = io_rsrc_node_switch_start(ctx);
 	if (ret)
diff --git a/io_uring/tctx.c b/io_uring/tctx.c
index 7f97d97fef0a..09c91cd7b5bf 100644
--- a/io_uring/tctx.c
+++ b/io_uring/tctx.c
@@ -12,7 +12,7 @@
 #include "io_uring.h"
 #include "tctx.h"
 
-static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
+struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
 					struct task_struct *task)
 {
 	struct io_wq_hash *hash;
@@ -34,9 +34,15 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
 	mutex_unlock(&ctx->uring_lock);
 
 	data.hash = hash;
+	/* for uringlet, wq->task is the iouring instance creator */
 	data.task = task;
 	data.free_work = io_wq_free_work;
 	data.do_work = io_wq_submit_work;
+	/* distinguish normal iowq and uringlet by wq->private for now */
+	if (ctx->flags & IORING_SETUP_URINGLET)
+		data.private = ctx;
+	else
+		data.private = NULL;
 
 	/* Do QD, or 4 * CPUS, whatever is smallest */
 	concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
-- 
2.25.1




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux