[PATCH RFC 4/7] blk-mq: precalculate available tags for hctx_may_queue()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Yu Kuai <yukuai3@xxxxxxxxxx>

Currently, hctx_mq_queue() only need to get how many queues is sharing
tags, then calculate how many tags is available for each queue by fair
sharing. In order to refactor how tag is shared, the calculation will be
more complicated, however, hctx_may_queue() is fast path, hence
precalculate available tags and prepare to refactor tag sharing.

Signed-off-by: Yu Kuai <yukuai3@xxxxxxxxxx>
---
 block/blk-mq-tag.c     | 19 +++++++++++++++++++
 block/blk-mq.c         |  3 +++
 block/blk-mq.h         | 14 +++++---------
 include/linux/blkdev.h |  3 ++-
 4 files changed, 29 insertions(+), 10 deletions(-)

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 8c527e68d4e4..e0137206c02b 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -14,6 +14,22 @@
 #include "blk-mq.h"
 #include "blk-mq-sched.h"
 
+static void blk_mq_update_available_driver_tags(struct blk_mq_hw_ctx *hctx)
+{
+	struct blk_mq_tags *tags = hctx->tags;
+	unsigned int nr_tags;
+	struct tag_sharing *tag_sharing;
+
+	if (tags->ctl.share_queues <= 1)
+		nr_tags = tags->nr_tags;
+	else
+		nr_tags = max((tags->nr_tags + tags->ctl.share_queues - 1) /
+			       tags->ctl.share_queues, 4U);
+
+	list_for_each_entry(tag_sharing, &tags->ctl.head, node)
+		tag_sharing->available_tags = nr_tags;
+}
+
 /*
  * Recalculate wakeup batch when tag is shared by hctx.
  */
@@ -51,6 +67,7 @@ void __blk_mq_driver_tag_busy(struct blk_mq_hw_ctx *hctx)
 
 	spin_lock_irq(&tags->lock);
 	WRITE_ONCE(tags->ctl.share_queues, tags->ctl.active_queues);
+	blk_mq_update_available_driver_tags(hctx);
 	blk_mq_update_wake_batch(tags, tags->ctl.share_queues);
 	spin_unlock_irq(&tags->lock);
 }
@@ -136,9 +153,11 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 
 	spin_lock_irq(&tags->lock);
 	list_del_init(&tag_sharing->node);
+	tag_sharing->available_tags = tags->nr_tags;
 	__blk_mq_driver_tag_idle(hctx);
 	WRITE_ONCE(tags->ctl.active_queues, tags->ctl.active_queues - 1);
 	WRITE_ONCE(tags->ctl.share_queues, tags->ctl.active_queues);
+	blk_mq_update_available_driver_tags(hctx);
 	blk_mq_update_wake_batch(tags, tags->ctl.share_queues);
 	spin_unlock_irq(&tags->lock);
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 171ee4ac97ef..771802ff1d45 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3621,6 +3621,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
 	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
 
 	hctx->tags = set->tags[hctx_idx];
+	hctx->tag_sharing.available_tags = hctx->tags->nr_tags;
 
 	if (set->ops->init_hctx &&
 	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
@@ -3881,6 +3882,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
 		}
 
 		hctx->tags = set->tags[i];
+		hctx->tag_sharing.available_tags = hctx->tags->nr_tags;
 		WARN_ON(!hctx->tags);
 
 		/*
@@ -4234,6 +4236,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 	spin_lock_init(&q->requeue_lock);
 
 	q->nr_requests = set->queue_depth;
+	q->tag_sharing.available_tags = set->queue_depth;
 
 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
 	blk_mq_add_queue_tag_set(set, q);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 01441a5e9910..fcfb040efbbd 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -398,7 +398,7 @@ static inline void blk_mq_free_requests(struct list_head *list)
 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
 				  struct sbitmap_queue *bt)
 {
-	unsigned int depth, users;
+	unsigned int depth;
 
 	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
 		return true;
@@ -414,19 +414,15 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
 
 		if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
 			return true;
+
+		depth = READ_ONCE(q->tag_sharing.available_tags);
 	} else {
 		if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 			return true;
-	}
 
-	users = READ_ONCE(hctx->tags->ctl.share_queues);
-	if (!users)
-		return true;
+		depth = READ_ONCE(hctx->tag_sharing.available_tags);
+	}
 
-	/*
-	 * Allow at least some tags
-	 */
-	depth = max((bt->sb.depth + users - 1) / users, 4U);
 	return __blk_mq_active_requests(hctx) < depth;
 }
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 62f8fcc20c30..e5111bedfd8d 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -376,7 +376,8 @@ struct blk_independent_access_ranges {
 };
 
 struct tag_sharing {
-	struct list_head node;
+	struct list_head	node;
+	unsigned int		available_tags;
 };
 
 struct request_queue {
-- 
2.39.2




[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux