Kernel parameter of `isolcpus=` or 'nohz_full=' are used for isolating CPUs for specific task, and user often won't want block IO to disturb these CPUs, also long IO latency may be caused if blk-mq kworker is scheduled on these isolated CPUs. Kernel workqueue only respects this limit for WQ_UNBOUND, for bound wq, the responsibility should be on wq user. So don't not run block kworker on isolated CPUs by ruling out isolated CPUs from hctx->cpumask. Meantime in cpuhp handler, use queue map to check if all CPUs in this hw queue are offline, this way can avoid any cost in fast IO code path. Cc: Juri Lelli <juri.lelli@xxxxxxxxxx> Cc: Andrew Theurer <atheurer@xxxxxxxxxx> Cc: Joe Mario <jmario@xxxxxxxxxx> Cc: Sebastian Jug <sejug@xxxxxxxxxx> Cc: Frederic Weisbecker <frederic@xxxxxxxxxx> Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- V2: - remove module parameter, meantime use queue map to check if all cpus in one hctx are offline block/blk-mq.c | 42 +++++++++++++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 9 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index ec922c6bccbe..91055bdc4426 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -29,6 +29,7 @@ #include <linux/prefetch.h> #include <linux/blk-crypto.h> #include <linux/part_stat.h> +#include <linux/sched/isolation.h> #include <trace/events/block.h> @@ -3476,14 +3477,27 @@ static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx) return data.has_rq; } -static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu, - struct blk_mq_hw_ctx *hctx) +static bool blk_mq_hctx_has_online_cpu(struct blk_mq_hw_ctx *hctx) { - if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu) - return false; - if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids) - return false; - return true; + struct blk_mq_tag_set *tag_set = hctx->queue->tag_set; + int cpu; + + /* + * hctx->cpumask has rule out isolated CPUs, but userspace still + * might submit IOs on these isolated CPUs, so use queue map to + * check if all CPUs mapped to this hctx are offline + */ + for_each_possible_cpu(cpu) { + unsigned idx = tag_set->map[hctx->type].mq_map[cpu]; + + if (idx != hctx->queue_num) + continue; + + if (cpu_online(cpu)) + return true; + } + + return false; } static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node) @@ -3491,8 +3505,7 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node) struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_online); - if (!cpumask_test_cpu(cpu, hctx->cpumask) || - !blk_mq_last_cpu_in_hctx(cpu, hctx)) + if (blk_mq_hctx_has_online_cpu(hctx)) return 0; /* @@ -3900,6 +3913,8 @@ static void blk_mq_map_swqueue(struct request_queue *q) } queue_for_each_hw_ctx(q, hctx, i) { + int cpu; + /* * If no software queues are mapped to this hardware queue, * disable it and free the request entries. @@ -3926,6 +3941,15 @@ static void blk_mq_map_swqueue(struct request_queue *q) */ sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); + /* + * rule out isolated CPUs from hctx->cpumask for avoiding to + * run wq worker on isolated CPU + */ + for_each_cpu(cpu, hctx->cpumask) { + if (cpu_is_isolated(cpu)) + cpumask_clear_cpu(cpu, hctx->cpumask); + } + /* * Initialize batch roundrobin counts */ -- 2.41.0