Add each io_wq to a global list, and assign an id for each of them. This is in preparation for attaching to an existing io_wq, rather than creating a new one. Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- fs/io-wq.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/fs/io-wq.c b/fs/io-wq.c index 54e270ae12ab..8cb0cff5f15c 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -16,9 +16,14 @@ #include <linux/slab.h> #include <linux/kthread.h> #include <linux/rculist_nulls.h> +#include <linux/idr.h> #include "io-wq.h" +static LIST_HEAD(wq_list); +static DEFINE_MUTEX(wq_lock); +static DEFINE_IDA(wq_ida); + #define WORKER_IDLE_TIMEOUT (5 * HZ) enum { @@ -115,6 +120,8 @@ struct io_wq { struct completion done; refcount_t use_refs; + struct list_head wq_list; + unsigned int id; }; static bool io_worker_get(struct io_worker *worker) @@ -1019,7 +1026,7 @@ void io_wq_flush(struct io_wq *wq) struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) { - int ret = -ENOMEM, node; + int ret = -ENOMEM, node, id; struct io_wq *wq; wq = kzalloc(sizeof(*wq), GFP_KERNEL); @@ -1076,6 +1083,16 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) goto err; } refcount_set(&wq->use_refs, 1); + + id = ida_simple_get(&wq_ida, 1, INT_MAX, GFP_KERNEL); + if (id == -ENOSPC) { + ret = -ENOMEM; + goto err; + } + mutex_lock(&wq_lock); + wq->id = id; + list_add(&wq->wq_list, &wq_list); + mutex_unlock(&wq_lock); reinit_completion(&wq->done); return wq; } @@ -1119,6 +1136,12 @@ static void __io_wq_destroy(struct io_wq *wq) void io_wq_destroy(struct io_wq *wq) { - if (refcount_dec_and_test(&wq->use_refs)) + if (refcount_dec_and_test(&wq->use_refs)) { + mutex_lock(&wq_lock); + ida_simple_remove(&wq_ida, wq->id); + list_del(&wq->wq_list); + mutex_unlock(&wq_lock); + __io_wq_destroy(wq); + } } -- 2.25.0