This patch add a new member nr_allocated_map_rqs to the struct blk_mq_tag_set to record the number of maps and requests have been allocated for this tagset. Now there is a problem when we increase hardware queue count, we do not allocate maps and request for the new allocated hardware queue, it will be fixed in the next patch. Since request needs lots of memory, it's not easy alloc so many memory dynamically, espeicially when system is under memory pressure. This patch allow nr_hw_queues does not equal to the nr_allocated_map_rqs, to avoid alloc/free memory when change hardware queue count. Signed-off-by: Weiping Zhang <zhangweiping@xxxxxxxxxxxxxx> --- block/blk-mq.c | 28 +++++++++++++++++++++------- include/linux/blk-mq.h | 1 + 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index df243c19a158..15f6a811122a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2479,8 +2479,10 @@ static bool __blk_mq_alloc_rq_map_and_request(struct blk_mq_tag_set *set, int hc ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx, set->queue_depth); - if (!ret) + if (!ret) { + set->nr_allocated_map_rqs++; return true; + } blk_mq_free_rq_map(set->tags[hctx_idx]); set->tags[hctx_idx] = NULL; @@ -2494,6 +2496,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx); blk_mq_free_rq_map(set->tags[hctx_idx]); set->tags[hctx_idx] = NULL; + set->nr_allocated_map_rqs--; } } @@ -2978,18 +2981,28 @@ void blk_mq_exit_queue(struct request_queue *q) blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); } -static int __blk_mq_alloc_rq_map_and_requests(struct blk_mq_tag_set *set) +/* + * Only append new map and requests, if new > now, all of these maps and + * request will be released when cleanup whole tag set. Because requests + * will cost lots memory, if system's memory is under a pressure, it's not + * easy to allocate too much memory. + */ +static int blk_mq_realloc_rq_map_and_requests(struct blk_mq_tag_set *set, + int new) { - int i; + int i, now = set->nr_allocated_map_rqs; + + if (new <= now) + return 0; - for (i = 0; i < set->nr_hw_queues; i++) + for (i = now; i < new; i++) if (!__blk_mq_alloc_rq_map_and_request(set, i)) goto out_unwind; return 0; out_unwind: - while (--i >= 0) + while (--i >= now) blk_mq_free_map_and_requests(set, i); return -ENOMEM; @@ -3007,7 +3020,8 @@ static int blk_mq_alloc_rq_map_and_requests(struct blk_mq_tag_set *set) depth = set->queue_depth; do { - err = __blk_mq_alloc_rq_map_and_requests(set); + err = blk_mq_realloc_rq_map_and_requests(set, + set->nr_hw_queues); if (!err) break; @@ -3184,7 +3198,7 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) { int i, j; - for (i = 0; i < set->nr_hw_queues; i++) + for (i = 0; i < set->nr_allocated_map_rqs; i++) blk_mq_free_map_and_requests(set, i); for (j = 0; j < set->nr_maps; j++) { diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index f389d7c724bd..d950435cd3c6 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -240,6 +240,7 @@ struct blk_mq_tag_set { unsigned int nr_maps; const struct blk_mq_ops *ops; unsigned int nr_hw_queues; + unsigned int nr_allocated_map_rqs; unsigned int queue_depth; unsigned int reserved_tags; unsigned int cmd_size; -- 2.18.1