Hi Bart On 07/26/2018 06:26 AM, Bart Van Assche wrote: > @@ -102,9 +109,11 @@ int blk_pre_runtime_suspend(struct request_queue *q) > return ret; > > blk_pm_runtime_lock(q); > + blk_set_preempt_only(q); We only stop non-RQF_PM request entering when RPM_SUSPENDING and RPM_RESUMING. blk_pre_runtime_suspend should only _check_ whether runtime suspend is allowed. So we should not set preempt_only here. > + percpu_ref_switch_to_atomic_sync(&q->q_usage_counter); > > spin_lock_irq(q->queue_lock); > - if (q->nr_pending) { > + if (!percpu_ref_is_zero(&q->q_usage_counter)) { > ret = -EBUSY; > pm_runtime_mark_last_busy(q->dev); > } else { > @@ -112,6 +121,7 @@ int blk_pre_runtime_suspend(struct request_queue *q) > } > spin_unlock_irq(q->queue_lock); > > + percpu_ref_switch_to_percpu(&q->q_usage_counter); > blk_pm_runtime_unlock(q);