We need flush tags unique across hardware contexts and do not overlap with normal tags. BLK_MQ_MAX_DEPTH as a base number seems better choice than a queue's depth. CC: linux-block@xxxxxxxxxxxxxxx Signed-off-by: Alexander Gordeev <agordeev@xxxxxxxxxx> --- block/blk-mq.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index c617002..d1f5c2e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1663,14 +1663,12 @@ static void blk_mq_exit_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { - unsigned flush_start_tag = set->queue_depth; - blk_mq_tag_idle(hctx); if (set->ops->exit_request) set->ops->exit_request(set->driver_data, hctx->fq->flush_rq, hctx_idx, - flush_start_tag + hctx_idx); + BLK_MQ_MAX_DEPTH + hctx_idx); if (set->ops->exit_hctx) set->ops->exit_hctx(hctx, hctx_idx); @@ -1699,7 +1697,6 @@ static void blk_mq_exit_hw_queues(struct request_queue *q, static struct blk_mq_hw_ctx *blk_mq_init_hctx(struct request_queue *q, struct blk_mq_tag_set *set, unsigned hctx_idx) { - unsigned flush_start_tag = set->queue_depth; struct blk_mq_hw_ctx *hctx; int node; @@ -1753,7 +1750,7 @@ static struct blk_mq_hw_ctx *blk_mq_init_hctx(struct request_queue *q, if (set->ops->init_request && set->ops->init_request(set->driver_data, hctx->fq->flush_rq, hctx_idx, - flush_start_tag + hctx_idx, node)) + BLK_MQ_MAX_DEPTH + hctx_idx, node)) goto exit_hctx; return hctx; -- 1.8.3.1 -- To unsubscribe from this list: send the line "unsubscribe linux-block" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html