drivers that have shared tagsets may need to quiesce potentially a lot of request queues that all share a single tagset (e.g. nvme). Add an interface to quiesce all the queues on a given tagset. This interface is useful because it can speedup the quiesce by doing it in parallel. For tagsets that have BLK_MQ_F_BLOCKING set, we kill request queue's dispatch percpu-refcount such that all of them wait for the counter becoming zero. For tagsets that don't have BLK_MQ_F_BLOCKING set, we simply call a single synchronize_rcu as this is sufficient. This patch is against Sagi's original post. Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> Cc: Sagi Grimberg <sagi@xxxxxxxxxxx> Cc: Bart Van Assche <bvanassche@xxxxxxx> Cc: Johannes Thumshirn <Johannes.Thumshirn@xxxxxxx> Cc: Chao Leng <lengchao@xxxxxxxxxx> Reviewed-by: Hannes Reinecke <hare@xxxxxxx> --- block/blk-mq.c | 59 +++++++++++++++++++++++++++++++++++------- include/linux/blk-mq.h | 2 ++ 2 files changed, 51 insertions(+), 10 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index df0fa3640649..ccb500e38008 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -214,16 +214,7 @@ void blk_mq_quiesce_queue_nowait(struct request_queue *q) } EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); -/** - * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished - * @q: request queue. - * - * Note: this function does not prevent that the struct request end_io() - * callback function is invoked. Once this function is returned, we make - * sure no dispatch can happen until the queue is unquiesced via - * blk_mq_unquiesce_queue(). - */ -void blk_mq_quiesce_queue(struct request_queue *q) +static void __blk_mq_quiesce_queue(struct request_queue *q, bool wait) { bool blocking = !!(q->tag_set->flags & BLK_MQ_F_BLOCKING); bool was_quiesced =__blk_mq_quiesce_queue_nowait(q); @@ -231,6 +222,9 @@ void blk_mq_quiesce_queue(struct request_queue *q) if (!was_quiesced && blocking) percpu_ref_kill(&q->dispatch_counter); + if (!wait) + return; + /* * In case of F_BLOCKING, if driver unquiesces its queue being * quiesced, it can cause bigger trouble, and we simply return & @@ -244,6 +238,20 @@ void blk_mq_quiesce_queue(struct request_queue *q) else synchronize_rcu(); } + +/* + * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished + * @q: request queue. + * + * Note: this function does not prevent that the struct request end_io() + * callback function is invoked. Once this function is returned, we make + * sure no dispatch can happen until the queue is unquiesced via + * blk_mq_unquiesce_queue(). + */ +void blk_mq_quiesce_queue(struct request_queue *q) +{ + __blk_mq_quiesce_queue(q, true); +} EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); /* @@ -265,6 +273,37 @@ void blk_mq_unquiesce_queue(struct request_queue *q) } EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); +void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set) +{ + struct request_queue *q; + + mutex_lock(&set->tag_list_lock); + list_for_each_entry(q, &set->tag_list, tag_set_list) + __blk_mq_quiesce_queue(q, false); + + /* wait until all queues' quiesce is done */ + if (set->flags & BLK_MQ_F_BLOCKING) { + list_for_each_entry(q, &set->tag_list, tag_set_list) + wait_event(q->mq_quiesce_wq, + percpu_ref_is_zero(&q->dispatch_counter)); + } else { + synchronize_rcu(); + } + mutex_unlock(&set->tag_list_lock); +} +EXPORT_SYMBOL_GPL(blk_mq_quiesce_tagset); + +void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set) +{ + struct request_queue *q; + + mutex_lock(&set->tag_list_lock); + list_for_each_entry(q, &set->tag_list, tag_set_list) + blk_mq_unquiesce_queue(q); + mutex_unlock(&set->tag_list_lock); +} +EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset); + void blk_mq_wake_waiters(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index df642055f02c..90da3582b91d 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -519,6 +519,8 @@ int blk_mq_map_queues(struct blk_mq_queue_map *qmap); void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); void blk_mq_quiesce_queue_nowait(struct request_queue *q); +void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set); +void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set); unsigned int blk_mq_rq_cpu(struct request *rq); -- 2.25.2