reduce acct->lock lock and unlock in different functions to make the code clearer. Signed-off-by: Hao Xu <haoxu@xxxxxxxxxxxxxxxxx> --- fs/io-wq.c | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/fs/io-wq.c b/fs/io-wq.c index 1869cf6c39f3..26ccc04797b7 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -237,10 +237,14 @@ static void io_worker_exit(struct io_worker *worker) static inline bool io_acct_run_queue(struct io_wqe_acct *acct) { + bool ret = false; + + raw_spin_lock(&acct->lock); if (!wq_list_empty(&acct->work_list) && !test_bit(IO_ACCT_STALLED_BIT, &acct->flags)) - return true; - return false; + ret = true; + raw_spin_unlock(&acct->lock); + return ret; } /* @@ -385,13 +389,9 @@ static void io_wqe_dec_running(struct io_worker *worker) if (!atomic_dec_and_test(&acct->nr_running)) return; - raw_spin_lock(&acct->lock); - if (!io_acct_run_queue(acct)) { - raw_spin_unlock(&acct->lock); + if (!io_acct_run_queue(acct)) return; - } - raw_spin_unlock(&acct->lock); atomic_inc(&acct->nr_running); atomic_inc(&wqe->wq->worker_refs); io_queue_worker_create(worker, acct, create_worker_cb); @@ -540,6 +540,7 @@ static void io_worker_handle_work(struct io_worker *worker) * can't make progress, any work completion or insertion will * clear the stalled flag. */ + raw_spin_lock(&acct->lock); work = io_get_next_work(acct, worker); raw_spin_unlock(&acct->lock); if (work) { @@ -579,15 +580,10 @@ static void io_worker_handle_work(struct io_worker *worker) clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); if (wq_has_sleeper(&wq->hash->wait)) wake_up(&wq->hash->wait); - raw_spin_lock(&acct->lock); - /* skip unnecessary unlock-lock wqe->lock */ if (!work) goto get_next; - raw_spin_unlock(&acct->lock); } } while (work); - - raw_spin_lock(&acct->lock); } while (1); } @@ -610,12 +606,9 @@ static int io_wqe_worker(void *data) set_current_state(TASK_INTERRUPTIBLE); loop: - raw_spin_lock(&acct->lock); if (io_acct_run_queue(acct)) { io_worker_handle_work(worker); goto loop; - } else { - raw_spin_unlock(&acct->lock); } raw_spin_lock(&wqe->lock); /* timed out, exit unless we're the last worker */ @@ -641,10 +634,8 @@ static int io_wqe_worker(void *data) last_timeout = !ret; } - if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) { - raw_spin_lock(&acct->lock); + if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) io_worker_handle_work(worker); - } io_worker_exit(worker); return 0; -- 2.24.4