From: Liu Song <liusong@xxxxxxxxxxxxxxxxx> Some conditions for judging whether batch alloc can be performed are included in "blk_mq_get_tags", and this function is only used by "__blk_mq_alloc_requests_batch". This patch introduced a helper "can_do_batch_alloc" to prepend the judgment condition and avoid unnecessary function calls. Signed-off-by: Liu Song <liusong@xxxxxxxxxxxxxxxxx> --- block/blk-mq-tag.c | 3 --- block/blk-mq.c | 11 ++++++++++- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 8e3b36d..cda3987 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -115,9 +115,6 @@ unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags, struct sbitmap_queue *bt = &tags->bitmap_tags; unsigned long ret; - if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED || - data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) - return 0; ret = __sbitmap_queue_get_batch(bt, nr_tags, offset); *offset += tags->nr_reserved_tags; return ret; diff --git a/block/blk-mq.c b/block/blk-mq.c index 3c1e6b6..330c618 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -437,6 +437,15 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, return rq_list_pop(data->cached_rq); } +static inline bool can_do_batch_alloc(struct blk_mq_alloc_data *data) +{ + if (data->nr_tags > 1 && !(data->shallow_depth || + data->flags & BLK_MQ_REQ_RESERVED || + data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) + return true; + return false; +} + static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) { struct request_queue *q = data->q; @@ -480,7 +489,7 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) /* * Try batched alloc if we want more than 1 tag. */ - if (data->nr_tags > 1) { + if (can_do_batch_alloc(data)) { rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns); if (rq) return rq; -- 1.8.3.1