blk_mq_alloc_request_hctx() asks blk-mq to allocate request from specified hctx, which is usually bound with fixed cpu mapping, and request is supposed to be allocated on CPU in hctx->cpumask. So use smp_call_function_any() to allocate request on the cpu in hctx->cpumask for blk_mq_alloc_request_hctx(). Dedclare blk_mq_get_request() beforehand because the following patches reuses __blk_mq_alloc_request for blk_mq_get_request(). Prepare for improving cpu hotplug support. Cc: Bart Van Assche <bvanassche@xxxxxxx> Cc: Hannes Reinecke <hare@xxxxxxxx> Cc: Christoph Hellwig <hch@xxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: John Garry <john.garry@xxxxxxxxxx> Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- block/blk-mq.c | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 9ee695bdf873..e2e1b6808b32 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -40,6 +40,10 @@ #include "blk-mq-sched.h" #include "blk-rq-qos.h" +static struct request *blk_mq_get_request(struct request_queue *q, + struct bio *bio, + struct blk_mq_alloc_data *data); + static void blk_mq_poll_stats_start(struct request_queue *q); static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); @@ -330,6 +334,19 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, return rq; } +struct blk_mq_smp_call_info { + struct request_queue *q; + struct blk_mq_alloc_data *data; + struct request *rq; +}; + +static void __blk_mq_alloc_request(void *alloc_info) +{ + struct blk_mq_smp_call_info *info = alloc_info; + + info->rq = blk_mq_get_request(info->q, NULL, info->data); +} + static struct request *blk_mq_get_request(struct request_queue *q, struct bio *bio, struct blk_mq_alloc_data *data) @@ -424,8 +441,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx) { struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op }; - struct request *rq; - unsigned int cpu; + struct blk_mq_smp_call_info info = {.q = q, .data = &alloc_data}; int ret; /* @@ -448,21 +464,22 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, * Check if the hardware context is actually mapped to anything. * If not tell the caller that it should skip this queue. */ - alloc_data.hctx = q->queue_hw_ctx[hctx_idx]; - if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) { + if (!blk_mq_hw_queue_mapped(q->queue_hw_ctx[hctx_idx])) { blk_queue_exit(q); return ERR_PTR(-EXDEV); } - cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask); - alloc_data.ctx = __blk_mq_get_ctx(q, cpu); - rq = blk_mq_get_request(q, NULL, &alloc_data); + ret = smp_call_function_any(alloc_data.hctx->cpumask, + __blk_mq_alloc_request, &info, 1); blk_queue_exit(q); - if (!rq) + if (ret) + return ERR_PTR(ret); + + if (!info.rq) return ERR_PTR(-EWOULDBLOCK); - return rq; + return info.rq; } EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); -- 2.25.2