Prepare for converting the flag of BLK_MQ_F_NO_SCHED into per-queue flag, since the following patches need this way for supporting per-host admin queue. Cc: Alan Stern <stern@xxxxxxxxxxxxxxxxxxx> Cc: Christoph Hellwig <hch@xxxxxx> Cc: Bart Van Assche <bart.vanassche@xxxxxxx> Cc: Jianchao Wang <jianchao.w.wang@xxxxxxxxxx> Cc: Hannes Reinecke <hare@xxxxxxx> Cc: Johannes Thumshirn <jthumshirn@xxxxxxx> Cc: Adrian Hunter <adrian.hunter@xxxxxxxxx> Cc: "James E.J. Bottomley" <jejb@xxxxxxxxxxxxxxxxxx> Cc: "Martin K. Petersen" <martin.petersen@xxxxxxxxxx> Cc: linux-scsi@xxxxxxxxxxxxxxx Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- block/blk-mq.c | 16 +++++++++------- include/linux/blk-mq.h | 19 ++++++++++++++++--- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 72a0033ccee9..d1194d1234f7 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2483,7 +2483,8 @@ void blk_mq_release(struct request_queue *q) free_percpu(q->queue_ctx); } -struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) +struct request_queue *__blk_mq_init_queue(struct blk_mq_tag_set *set, + unsigned long def_flags) { struct request_queue *uninit_q, *q; @@ -2491,13 +2492,13 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) if (!uninit_q) return ERR_PTR(-ENOMEM); - q = blk_mq_init_allocated_queue(set, uninit_q); + q = __blk_mq_init_allocated_queue(set, uninit_q, def_flags); if (IS_ERR(q)) blk_cleanup_queue(uninit_q); return q; } -EXPORT_SYMBOL(blk_mq_init_queue); +EXPORT_SYMBOL(__blk_mq_init_queue); static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set) { @@ -2571,8 +2572,9 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, blk_mq_sysfs_register(q); } -struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, - struct request_queue *q) +struct request_queue *__blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, + struct request_queue *q, + unsigned long def_flags) { /* mark the queue as mq asap */ q->mq_ops = set->ops; @@ -2606,7 +2608,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, q->nr_queues = nr_cpu_ids; - q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; + q->queue_flags |= def_flags; if (!(set->flags & BLK_MQ_F_SG_MERGE)) queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); @@ -2656,7 +2658,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, q->mq_ops = NULL; return ERR_PTR(-ENOMEM); } -EXPORT_SYMBOL(blk_mq_init_allocated_queue); +EXPORT_SYMBOL(__blk_mq_init_allocated_queue); void blk_mq_free_queue(struct request_queue *q) { diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 1da59c16f637..7f6ecd7b35ce 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -200,9 +200,22 @@ enum { ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ << BLK_MQ_F_ALLOC_POLICY_START_BIT) -struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); -struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, - struct request_queue *q); +struct request_queue *__blk_mq_init_queue(struct blk_mq_tag_set *, unsigned long); +struct request_queue *__blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, + struct request_queue *q, + unsigned long def_flags); + +static inline struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) +{ + return __blk_mq_init_queue(set, QUEUE_FLAG_MQ_DEFAULT); +} + +static inline struct request_queue * +blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *q) +{ + return __blk_mq_init_allocated_queue(set, q, QUEUE_FLAG_MQ_DEFAULT); +} + int blk_mq_register_dev(struct device *, struct request_queue *); void blk_mq_unregister_dev(struct device *, struct request_queue *); -- 2.9.5