We don't know what context we'll be called in for cancel, it could very well be with IRQs disabled already. Use the IRQ saving variants of the locking primitives. Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- diff --git a/fs/io-wq.c b/fs/io-wq.c index 3bbab2c58695..ba40a7ee31c3 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -642,19 +642,20 @@ static bool io_work_cancel(struct io_worker *worker, void *cancel_data) { struct io_cb_cancel_data *data = cancel_data; struct io_wqe *wqe = data->wqe; + unsigned long flags; bool ret = false; /* * Hold the lock to avoid ->cur_work going out of scope, caller * may deference the passed in work. */ - spin_lock_irq(&wqe->lock); + spin_lock_irqsave(&wqe->lock, flags); if (worker->cur_work && data->cancel(worker->cur_work, data->caller_data)) { send_sig(SIGINT, worker->task, 1); ret = true; } - spin_unlock_irq(&wqe->lock); + spin_unlock_irqrestore(&wqe->lock, flags); return ret; } @@ -669,9 +670,10 @@ static enum io_wq_cancel io_wqe_cancel_cb_work(struct io_wqe *wqe, .caller_data = cancel_data, }; struct io_wq_work *work; + unsigned long flags; bool found = false; - spin_lock_irq(&wqe->lock); + spin_lock_irqsave(&wqe->lock, flags); list_for_each_entry(work, &wqe->work_list, list) { if (cancel(work, cancel_data)) { list_del(&work->list); @@ -679,7 +681,7 @@ static enum io_wq_cancel io_wqe_cancel_cb_work(struct io_wqe *wqe, break; } } - spin_unlock_irq(&wqe->lock); + spin_unlock_irqrestore(&wqe->lock, flags); if (found) { work->flags |= IO_WQ_WORK_CANCEL; @@ -733,6 +735,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe, struct io_wq_work *cwork) { struct io_wq_work *work; + unsigned long flags; bool found = false; cwork->flags |= IO_WQ_WORK_CANCEL; @@ -742,7 +745,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe, * from there. CANCEL_OK means that the work is returned as-new, * no completion will be posted for it. */ - spin_lock_irq(&wqe->lock); + spin_lock_irqsave(&wqe->lock, flags); list_for_each_entry(work, &wqe->work_list, list) { if (work == cwork) { list_del(&work->list); @@ -750,7 +753,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe, break; } } - spin_unlock_irq(&wqe->lock); + spin_unlock_irqrestore(&wqe->lock, flags); if (found) { work->flags |= IO_WQ_WORK_CANCEL; -- Jens Axboe