When we select next cpu for running hw queue, all this hctx's CPUs may become offline, so deactivate this hctx at that time. So we can fix some corner case which can't be covered by deactivating hctx in CPU hotplug handler, such as request may be requeued during CPU hotplug, which handler can't found the requeued rquests and re-submit them. Cc: John Garry <john.garry@xxxxxxxxxx> Cc: Bart Van Assche <bvanassche@xxxxxxx> Cc: Hannes Reinecke <hare@xxxxxxxx> Cc: Christoph Hellwig <hch@xxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Keith Busch <keith.busch@xxxxxxxxx> Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- block/blk-mq.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 93c835312d42..fada556880ca 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -43,6 +43,8 @@ static void blk_mq_poll_stats_start(struct request_queue *q); static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); +static void blk_mq_hctx_deactivate(struct blk_mq_hw_ctx *hctx); + static int blk_mq_poll_stats_bkt(const struct request *rq) { int ddir, sectors, bucket; @@ -1431,7 +1433,7 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) */ hctx->next_cpu = next_cpu; hctx->next_cpu_batch = 1; - return WORK_CPU_UNBOUND; + return -1; } hctx->next_cpu = next_cpu; @@ -1450,6 +1452,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, unsigned long msecs) { + int cpu; + if (unlikely(blk_mq_hctx_stopped(hctx))) return; @@ -1464,8 +1468,13 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, put_cpu(); } - kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, - msecs_to_jiffies(msecs)); + cpu = blk_mq_hctx_next_cpu(hctx); + + if (likely(cpu >= 0)) + kblockd_mod_delayed_work_on(cpu, &hctx->run_work, + msecs_to_jiffies(msecs)); + else + blk_mq_hctx_deactivate(hctx); } /** -- 2.20.1