Hi,
在 2024/01/12 3:22, Bart Van Assche 写道:
On 12/25/23 04:51, Yu Kuai wrote:
Are you still intrested in this patchset? I really want this switch in
our product as well.
If so, how do you think about following changes, a new field in
blk_mq_tag_set will make synchronization much eaiser.
Do you perhaps see the new field as an alternative for the
BLK_MQ_F_DISABLE_FAIR_TAG_SHARING flag? I'm not sure that would be an
improvement. hctx_may_queue() is called from the hot path. Using the
'flags' field will make it easier for the compiler to optimize that
function compared to using a new structure member.
Yes, I realized that, handle the new flag in blk_mq_allow_hctx() is
good, how about following chang?
Thanks,
Kuai
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6ab7f360ff2a..dd7c9e3eca1b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3706,7 +3706,8 @@ blk_mq_alloc_hctx(struct request_queue *q, struct
blk_mq_tag_set *set,
spin_lock_init(&hctx->lock);
INIT_LIST_HEAD(&hctx->dispatch);
hctx->queue = q;
- hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
+ hctx->flags = set->flags & ~(BLK_MQ_F_TAG_QUEUE_SHARED |
+ BLK_MQ_F_DISABLE_FAIR_TAG_SHARING);
INIT_LIST_HEAD(&hctx->hctx_list);
@@ -3935,6 +3936,37 @@ static void blk_mq_map_swqueue(struct
request_queue *q)
}
}
+static void queue_update_fair_tag_sharing(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned long i;
+ bool disabled = q->tag_set->flags &
BLK_MQ_F_DISABLE_FAIR_TAG_SHARING;
+
+ lockdep_assert_held(&q->tag_set->tag_list_lock);
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (disabled)
+ hctx->flags |= BLK_MQ_F_DISABLE_FAIR_TAG_SHARING;
+ else
+ hctx->flags &= ~BLK_MQ_F_DISABLE_FAIR_TAG_SHARING;
+ }
+
+}
+
+void blk_mq_update_fair_tag_sharing(struct blk_mq_tag_set *set)
+{
+ struct request_queue *q;
+
+ lockdep_assert_held(&set->tag_list_lock);
+
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ blk_mq_freeze_queue(q);
+ queue_update_fair_tag_sharing(q);
+ blk_mq_unfreeze_queue(q);
+ }
+}
+EXPORT_SYMBOL_GPL(blk_mq_update_fair_tag_sharing);
+
/*
* Caller needs to ensure that we're either frozen/quiesced, or that
* the queue isn't live yet.
@@ -3989,6 +4021,7 @@ static void blk_mq_add_queue_tag_set(struct
blk_mq_tag_set *set,
{
mutex_lock(&set->tag_list_lock);
+ queue_update_fair_tag_sharing(q);
/*
* Check to see if we're transitioning to shared (from 1 to 2
queues).
*/
@@ -4767,6 +4800,9 @@ static void __blk_mq_update_nr_hw_queues(struct
blk_mq_tag_set *set,
blk_mq_map_swqueue(q);
}
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
+ queue_update_fair_tag_sharing(q);
+
reregister:
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_sysfs_register_hctxs(q);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 1743857e0b01..8b9aac701035 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -393,7 +393,8 @@ static inline bool hctx_may_queue(struct
blk_mq_hw_ctx *hctx,
{
unsigned int depth, users;
- if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
+ if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) ||
+ (hctx->flags & BLK_MQ_F_DISABLE_FAIR_TAG_SHARING))
return true;
/*
Thanks,
Bart.
.