Drivers that may have to quiesce a large amount of request queues at once (e.g. controller or adapter reset). These drivers would benefit from an async quiesce interface such that the can trigger quiesce asynchronously and wait for all in parallel. This leaves the synchronization responsibility to the driver, but adds a convenient interface to quiesce async and wait in a single pass. Signed-off-by: Sagi Grimberg <sagi@xxxxxxxxxxx> --- block/blk-mq.c | 32 ++++++++++++++++++++++++++++++++ include/linux/blk-mq.h | 4 ++++ 2 files changed, 36 insertions(+) diff --git a/block/blk-mq.c b/block/blk-mq.c index abcf590f6238..60d137265bd9 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -209,6 +209,38 @@ void blk_mq_quiesce_queue_nowait(struct request_queue *q) } EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); +void blk_mq_quiesce_queue_async(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + + blk_mq_quiesce_queue_nowait(q); + + queue_for_each_hw_ctx(q, hctx, i) { + init_completion(&hctx->rcu_sync.completion); + init_rcu_head(&hctx->rcu_sync.head); + if (hctx->flags & BLK_MQ_F_BLOCKING) + call_srcu(hctx->srcu, &hctx->rcu_sync.head, + wakeme_after_rcu); + else + call_rcu(&hctx->rcu_sync.head, + wakeme_after_rcu); + } +} +EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_async); + +void blk_mq_quiesce_queue_async_wait(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + + queue_for_each_hw_ctx(q, hctx, i) { + wait_for_completion(&hctx->rcu_sync.completion); + destroy_rcu_head(&hctx->rcu_sync.head); + } +} +EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_async_wait); + /** * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished * @q: request queue. diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 23230c1d031e..5536e434311a 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -5,6 +5,7 @@ #include <linux/blkdev.h> #include <linux/sbitmap.h> #include <linux/srcu.h> +#include <linux/rcupdate_wait.h> struct blk_mq_tags; struct blk_flush_queue; @@ -170,6 +171,7 @@ struct blk_mq_hw_ctx { */ struct list_head hctx_list; + struct rcu_synchronize rcu_sync; /** * @srcu: Sleepable RCU. Use as lock when type of the hardware queue is * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also @@ -532,6 +534,8 @@ int blk_mq_map_queues(struct blk_mq_queue_map *qmap); void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); void blk_mq_quiesce_queue_nowait(struct request_queue *q); +void blk_mq_quiesce_queue_async(struct request_queue *q); +void blk_mq_quiesce_queue_async_wait(struct request_queue *q); unsigned int blk_mq_rq_cpu(struct request *rq); -- 2.25.1