From: Yu Kuai <yukuai3@xxxxxxxxxx> This reverts commit 39823b47bbd40502632ffba90ebb34fff7c8b5e8. 1) Set min_shallow_depth to 1 will end up setting wake_batch to 1, and this will cause performance degradation in some high concurrency test, for both IO bandwidth and cpu usage. async_depth can be changed by sysfs, and the minimal value is 1. This is why min_shallow_depth is set to 1 at initialization to make sure functional is correct if async_depth is set to 1. However, sacrifice performance in the default scenario is not acceptable. 2) dd_to_word_depth() is supposed to scale down async_depth, however, user can set low nr_requests and sb->depth can be less than 1 << sb->shift, then dd_to_word_depth() will end up scale up async_depth. Fixes: 39823b47bbd4 ("block/mq-deadline: Fix the tag reservation code") Signed-off-by: Yu Kuai <yukuai3@xxxxxxxxxx> --- block/mq-deadline.c | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 91b3789f710e..1f0d175a941e 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -487,20 +487,6 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) return rq; } -/* - * 'depth' is a number in the range 1..INT_MAX representing a number of - * requests. Scale it with a factor (1 << bt->sb.shift) / q->nr_requests since - * 1..(1 << bt->sb.shift) is the range expected by sbitmap_get_shallow(). - * Values larger than q->nr_requests have the same effect as q->nr_requests. - */ -static int dd_to_word_depth(struct blk_mq_hw_ctx *hctx, unsigned int qdepth) -{ - struct sbitmap_queue *bt = &hctx->sched_tags->bitmap_tags; - const unsigned int nrr = hctx->queue->nr_requests; - - return ((qdepth << bt->sb.shift) + nrr - 1) / nrr; -} - /* * Called by __blk_mq_alloc_request(). The shallow_depth value set by this * function is used by __blk_mq_get_tag(). @@ -517,7 +503,7 @@ static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) * Throttle asynchronous requests and writes such that these requests * do not block the allocation of synchronous requests. */ - data->shallow_depth = dd_to_word_depth(data->hctx, dd->async_depth); + data->shallow_depth = dd->async_depth; } /* Called by blk_mq_update_nr_requests(). */ @@ -527,9 +513,9 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) struct deadline_data *dd = q->elevator->elevator_data; struct blk_mq_tags *tags = hctx->sched_tags; - dd->async_depth = q->nr_requests; + dd->async_depth = max(1UL, 3 * q->nr_requests / 4); - sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1); + sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth); } /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */ -- 2.39.2