From: Yu Kuai <yukuai3@xxxxxxxxxx> min_shallow_depth must be less or equal to any shallow_depth value, and it's 1 currently, and this will change default wake_batch to 1, causing performance degradation for fast disk with high concurrency. Fix the problem by using queue async_depth, so that min_shallow_depth can be updated if user set new value, hence it's not necessary to use 1 anymore. Fixes: 39823b47bbd4 ("block/mq-deadline: Fix the tag reservation code") Signed-off-by: Yu Kuai <yukuai3@xxxxxxxxxx> --- block/mq-deadline.c | 43 ++++++++++--------------------------------- 1 file changed, 10 insertions(+), 33 deletions(-) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 853985bd13d4..8d19685cce3e 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -98,7 +98,6 @@ struct deadline_data { int fifo_batch; int writes_starved; int front_merges; - u32 async_depth; int prio_aging_expire; spinlock_t lock; @@ -493,8 +492,6 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) */ static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) { - struct deadline_data *dd = data->q->elevator->elevator_data; - /* Do not throttle synchronous reads. */ if (op_is_sync(opf) && !op_is_write(opf)) return; @@ -503,25 +500,19 @@ static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) * Throttle asynchronous requests and writes such that these requests * do not block the allocation of synchronous requests. */ - data->shallow_depth = dd->async_depth; + data->shallow_depth = data->q->async_depth; } -/* Called by blk_mq_update_nr_requests(). */ -static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) +static int dd_async_depth_updated(struct request_queue *q, + unsigned int async_depth) { - struct request_queue *q = hctx->queue; - struct deadline_data *dd = q->elevator->elevator_data; - struct blk_mq_tags *tags = hctx->sched_tags; - - dd->async_depth = q->nr_requests; + struct blk_mq_hw_ctx *hctx; + unsigned long i; - sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1); -} - -/* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */ -static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) -{ - dd_depth_updated(hctx); + q->async_depth = async_depth; + queue_for_each_hw_ctx(q, hctx, i) + sbitmap_queue_min_shallow_depth(&hctx->sched_tags->bitmap_tags, + async_depth ? async_depth : UINT_MAX); return 0; } @@ -781,7 +772,6 @@ SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]); SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire); SHOW_INT(deadline_writes_starved_show, dd->writes_starved); SHOW_INT(deadline_front_merges_show, dd->front_merges); -SHOW_INT(deadline_async_depth_show, dd->async_depth); SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch); #undef SHOW_INT #undef SHOW_JIFFIES @@ -811,7 +801,6 @@ STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MA STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX); STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX); STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1); -STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX); STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX); #undef STORE_FUNCTION #undef STORE_INT @@ -825,7 +814,6 @@ static struct elv_fs_entry deadline_attrs[] = { DD_ATTR(write_expire), DD_ATTR(writes_starved), DD_ATTR(front_merges), - DD_ATTR(async_depth), DD_ATTR(fifo_batch), DD_ATTR(prio_aging_expire), __ATTR_NULL @@ -912,15 +900,6 @@ static int deadline_starved_show(void *data, struct seq_file *m) return 0; } -static int dd_async_depth_show(void *data, struct seq_file *m) -{ - struct request_queue *q = data; - struct deadline_data *dd = q->elevator->elevator_data; - - seq_printf(m, "%u\n", dd->async_depth); - return 0; -} - static int dd_queued_show(void *data, struct seq_file *m) { struct request_queue *q = data; @@ -1030,7 +1009,6 @@ static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { DEADLINE_NEXT_RQ_ATTR(write2), {"batching", 0400, deadline_batching_show}, {"starved", 0400, deadline_starved_show}, - {"async_depth", 0400, dd_async_depth_show}, {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops}, {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops}, {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops}, @@ -1043,7 +1021,6 @@ static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { static struct elevator_type mq_deadline = { .ops = { - .depth_updated = dd_depth_updated, .limit_depth = dd_limit_depth, .insert_requests = dd_insert_requests, .dispatch_request = dd_dispatch_request, @@ -1058,7 +1035,7 @@ static struct elevator_type mq_deadline = { .has_work = dd_has_work, .init_sched = dd_init_sched, .exit_sched = dd_exit_sched, - .init_hctx = dd_init_hctx, + .async_depth_updated = dd_async_depth_updated, }, #ifdef CONFIG_BLK_DEBUG_FS -- 2.39.2