Let's change the worker state between free/running only when it's necessary. This can reduce some lock contension. Signed-off-by: Hao Xu <haoxu@xxxxxxxxxxxxxxxxx> --- fs/io-wq.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/fs/io-wq.c b/fs/io-wq.c index 097ea598bfe5..377c3e42a491 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -434,12 +434,11 @@ static void io_wqe_dec_running(struct io_worker *worker) */ static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker, struct io_wq_work *work) - __must_hold(wqe->lock) { - if (worker->flags & IO_WORKER_F_FREE) { - worker->flags &= ~IO_WORKER_F_FREE; - hlist_nulls_del_init_rcu(&worker->nulls_node); - } + raw_spin_lock(&wqe->lock); + worker->flags &= ~IO_WORKER_F_FREE; + hlist_nulls_del_init_rcu(&worker->nulls_node); + raw_spin_unlock(&wqe->lock); } /* @@ -587,13 +586,10 @@ static void io_worker_handle_work(struct io_worker *worker, struct io_wqe_acct * * clear the stalled flag. */ work = io_get_next_work(acct, worker, needs_lock); - if (work) { - raw_spin_lock(&wqe->lock); + if (work && (worker->flags & IO_WORKER_F_FREE)) __io_worker_busy(wqe, worker, work); - raw_spin_unlock(&wqe->lock); - } else { + else if (!work) break; - } io_assign_current_work(worker, work); __set_current_state(TASK_RUNNING); -- 2.24.4