Use the block layer helper to initialize the common fields of tag_set such as blk_mq_ops, number of h/w queues, queue depth, command size, numa_node, timeout, BLK_MQ_F_XXX flags, driver data. This initialization is spread all over the block drivers. This avoids the code repetation of the inialization code of the tag set in current block drivers and any future ones. Signed-off-by: Chaitanya Kulkarni <kch@xxxxxxxxxx> --- drivers/mmc/core/queue.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index fefaa901b50f..599a34a5680a 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -417,7 +417,6 @@ struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) spin_lock_init(&mq->lock); memset(&mq->tag_set, 0, sizeof(mq->tag_set)); - mq->tag_set.ops = &mmc_mq_ops; /* * The queue depth for CQE must match the hardware because the request * tag is used to index the hardware queue. @@ -427,11 +426,9 @@ struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth); else mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; - mq->tag_set.numa_node = NUMA_NO_NODE; - mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; - mq->tag_set.nr_hw_queues = 1; - mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); - mq->tag_set.driver_data = mq; + blk_mq_init_tag_set(&mq->tag_set, &mmc_mq_ops, 1, 0, + sizeof(struct mmc_queue_req), NUMA_NO_NODE, 0, + BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING, mq); /* * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops, -- 2.29.0