On 30.11.23 20:31, Bart Van Assche wrote: > +void blk_mq_update_fair_sharing(struct blk_mq_tag_set *set, bool enable) > +{ > + const unsigned int DFTS_BIT = ilog2(BLK_MQ_F_DISABLE_FAIR_TAG_SHARING); > + struct blk_mq_hw_ctx *hctx; > + struct request_queue *q; > + unsigned long i; > + > + /* > + * Serialize against blk_mq_update_nr_hw_queues() and > + * blk_mq_realloc_hw_ctxs(). > + */ > + mutex_lock(&set->tag_list_lock); > + list_for_each_entry(q, &set->tag_list, tag_set_list) > + blk_mq_freeze_queue(q); > + assign_bit(DFTS_BIT, &set->flags, !enable); > + list_for_each_entry(q, &set->tag_list, tag_set_list) > + queue_for_each_hw_ctx(q, hctx, i) > + assign_bit(DFTS_BIT, &hctx->flags, !enable); > + list_for_each_entry(q, &set->tag_list, tag_set_list) > + blk_mq_unfreeze_queue(q); > + mutex_unlock(&set->tag_list_lock); Hi Bart, The above code adds a 3rd user (at least) of the following pattern to the kernel: list_for_each_entry(q, &set->tag_list, tag_set_list) blk_mq_freeze_queue(q); /* do stuff */ list_for_each_entry(q, &set->tag_list, tag_set_list) blk_mq_unfreeze_queue(q); Would it maybe be beneficial if we'd introduce functions for this, like: static inline void blk_mq_freeze_tag_set(struct blk_mq_tag_set *set) { lockdep_assert_held(&set->tag_list_lock); list_for_each_entry(q, &set->tag_list, tag_set_list) blk_mq_freeze_queue(q); } static inline void blk_mq_unfreeze_tag_set(struct blk_mq_tag_set *set) { lockdep_assert_held(&set->tag_list_lock); list_for_each_entry(q, &set->tag_list, tag_set_list) blk_mq_unfreeze_queue(q); }