On Thu, Oct 20, 2022 at 11:53:47AM +0800, Chao Leng wrote: > Drivers that have shared tagsets may need to quiesce potentially a lot > of request queues that all share a single tagset (e.g. nvme). Add an > interface to quiesce all the queues on a given tagset. This interface is > useful because it can speedup the quiesce by doing it in parallel. > > For tagsets that have BLK_MQ_F_BLOCKING set, we first call > start_poll_synchronize_srcu for all queues of the tagset, and then call > poll_state_synchronize_srcu such that all of them wait for the same srcu > elapsed period. For tagsets that don't have BLK_MQ_F_BLOCKING set, > we simply call a single synchronize_rcu as this is sufficient. > > Because some queues should not need to be quiesced(e.g. nvme connect_q) > when quiesce the tagset. So introduce QUEUE_FLAG_SKIP_TAGSET_QUIESCE to > tagset quiesce interface to skip the queue. > > Signed-off-by: Sagi Grimberg <sagi@xxxxxxxxxxx> > Signed-off-by: Chao Leng <lengchao@xxxxxxxxxx> >From an RCU viewpoint: Reviewed-by: Paul E. McKenney <paulmck@xxxxxxxxxx> As noted in the earlier email thread, using a single srcu_struct for the whole tag list would require even less memory and would further reduce the overhead and latency of blk_mq_quiesce_blocking_tagset(). Thanx, Paul > --- > block/blk-mq.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++ > include/linux/blk-mq.h | 2 ++ > include/linux/blkdev.h | 3 ++ > 3 files changed, 81 insertions(+) > > diff --git a/block/blk-mq.c b/block/blk-mq.c > index 8070b6c10e8d..f064ecda425b 100644 > --- a/block/blk-mq.c > +++ b/block/blk-mq.c > @@ -311,6 +311,82 @@ void blk_mq_unquiesce_queue(struct request_queue *q) > } > EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); > > +static void blk_mq_quiesce_blocking_tagset(struct blk_mq_tag_set *set) > +{ > + int i, count = 0; > + struct request_queue *q; > + unsigned long *rcu; > + > + list_for_each_entry(q, &set->tag_list, tag_set_list) { > + if (blk_queue_skip_tagset_quiesce(q)) > + continue; > + > + blk_mq_quiesce_queue_nowait(q); > + count++; > + } > + > + rcu = kvmalloc(count * sizeof(*rcu), GFP_KERNEL); > + if (rcu) { > + i = 0; > + list_for_each_entry(q, &set->tag_list, tag_set_list) { > + if (blk_queue_skip_tagset_quiesce(q)) > + continue; > + > + rcu[i++] = start_poll_synchronize_srcu(q->srcu); > + } > + > + i = 0; > + list_for_each_entry(q, &set->tag_list, tag_set_list) { > + if (blk_queue_skip_tagset_quiesce(q)) > + continue; > + > + if (!poll_state_synchronize_srcu(q->srcu, rcu[i++])) > + synchronize_srcu(q->srcu); > + } > + > + kvfree(rcu); > + } else { > + list_for_each_entry(q, &set->tag_list, tag_set_list) > + synchronize_srcu(q->srcu); > + } > +} > + > +static void blk_mq_quiesce_nonblocking_tagset(struct blk_mq_tag_set *set) > +{ > + struct request_queue *q; > + > + list_for_each_entry(q, &set->tag_list, tag_set_list) { > + if (blk_queue_skip_tagset_quiesce(q)) > + continue; > + > + blk_mq_quiesce_queue_nowait(q); > + } > + synchronize_rcu(); > +} > + > +void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set) > +{ > + mutex_lock(&set->tag_list_lock); > + if (set->flags & BLK_MQ_F_BLOCKING) > + blk_mq_quiesce_blocking_tagset(set); > + else > + blk_mq_quiesce_nonblocking_tagset(set); > + > + mutex_unlock(&set->tag_list_lock); > +} > +EXPORT_SYMBOL_GPL(blk_mq_quiesce_tagset); > + > +void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set) > +{ > + struct request_queue *q; > + > + mutex_lock(&set->tag_list_lock); > + list_for_each_entry(q, &set->tag_list, tag_set_list) > + blk_mq_unquiesce_queue(q); > + mutex_unlock(&set->tag_list_lock); > +} > +EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset); > + > void blk_mq_wake_waiters(struct request_queue *q) > { > struct blk_mq_hw_ctx *hctx; > diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h > index ba18e9bdb799..1df47606d0a7 100644 > --- a/include/linux/blk-mq.h > +++ b/include/linux/blk-mq.h > @@ -877,6 +877,8 @@ void blk_mq_start_hw_queues(struct request_queue *q); > void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); > void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); > void blk_mq_quiesce_queue(struct request_queue *q); > +void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set); > +void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set); > void blk_mq_wait_quiesce_done(struct request_queue *q); > void blk_mq_unquiesce_queue(struct request_queue *q); > void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); > diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h > index 50e358a19d98..efa3fa771dce 100644 > --- a/include/linux/blkdev.h > +++ b/include/linux/blkdev.h > @@ -579,6 +579,7 @@ struct request_queue { > #define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ > #define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */ > #define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */ > +#define QUEUE_FLAG_SKIP_TAGSET_QUIESCE 31 /* quiesce_tagset skip the queue*/ > > #define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \ > (1UL << QUEUE_FLAG_SAME_COMP) | \ > @@ -619,6 +620,8 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); > #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) > #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) > #define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags) > +#define blk_queue_skip_tagset_quiesce(q) \ > + test_bit(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, &(q)->queue_flags) > > extern void blk_set_pm_only(struct request_queue *q); > extern void blk_clear_pm_only(struct request_queue *q); > -- > 2.16.4 >