Hi Ming and Bart Would you mind to combine your solution together ? ;) It could be like this: blk_pre_runtime_suspend if (q->mq_ops) { if (!blk_mq_pm_queue_idle(q)) { ret = -EBUSY; pm_runtime_mark_last_busy(q->dev); } else { blk_set_preempt_only(q); synchronize_rcu() if (!blk_mq_pm_queue_idle(q)) { blk_clear_preempt_only(q); ret = -EBUSY; } else { q->rpm_status = RPM_SUSPENDING; } } } else { spin_lock_irq(q->queue_lock); if (q->nr_pending) { ret = -EBUSY; pm_runtime_mark_last_busy(q->dev); } else { q->rpm_status = RPM_SUSPENDING; } spin_unlock_irq(q->queue_lock); } blk_queue_enter blk_resume_queue(q); wait_event(q->mq_freeze_wq, atomic_read(&q->mq_freeze_depth) == 0 || blk_queue_dying(q)); Thanks Jianchao On 08/08/2018 11:50 AM, Ming Lei wrote: > iff --git a/block/blk-core.c b/block/blk-core.c > index 26f9ceb85318..d1a5cd1da861 100644 > --- a/block/blk-core.c > +++ b/block/blk-core.c > @@ -3730,6 +3730,24 @@ void blk_pm_runtime_init(struct request_queue *q, struct device *dev) > } > EXPORT_SYMBOL(blk_pm_runtime_init); > > +static void blk_mq_pm_check_idle(struct blk_mq_hw_ctx *hctx, > + struct request *rq, void *priv, bool reserved) > +{ > + unsigned long *cnt = priv; > + > + (*cnt)++; > +} > + > +static bool blk_mq_pm_queue_idle(struct request_queue *q) > +{ > + unsigned long idle_cnt; > + > + idle_cnt = 0; > + blk_mq_queue_tag_busy_iter(q, blk_mq_pm_check_idle, &idle_cnt); > + > + return idle_cnt == 0; > +} > + > /** > * blk_pre_runtime_suspend - Pre runtime suspend check > * @q: the queue of the device > @@ -3754,13 +3772,18 @@ EXPORT_SYMBOL(blk_pm_runtime_init); > int blk_pre_runtime_suspend(struct request_queue *q) > { > int ret = 0; > + bool mq_idle = false; > > if (!q->dev) > return ret; > > mutex_lock(&q->pm_lock); > + > + if (q->mq_ops) > + mq_idle = blk_mq_pm_queue_idle(q); > + > spin_lock_irq(q->queue_lock); > - if (q->nr_pending) { > + if (q->nr_pending || !mq_idle) { > ret = -EBUSY; > pm_runtime_mark_last_busy(q->dev); > } else {