On 24/04/2020 11:23, Ming Lei wrote:
static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
{
+ struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
+ struct blk_mq_hw_ctx, cpuhp_online);
+
+ if (!cpumask_test_cpu(cpu, hctx->cpumask))
+ return 0;
+
+ if ((cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu) ||
+ (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids))
+ return 0;
nit: personally I prefer what we had previously, as it was easier to
read, even if it did cause the code to be indented:
if ((cpumask_next_and(-1, cpumask, online_mask) == cpu) &&
(cpumask_next_and(cpu, cpumask, online_mask) >= nr_cpu_ids)) {
// do deactivate
}
return 0
and it could avoid the cpumask_test_cpu() test, unless you want that as
an optimisation. If so, a comment could help.
cheers,
John
+
+ /*
+ * The current CPU is the last one in this hctx, S_INACTIVE
+ * can be observed in dispatch path without any barrier needed,
+ * cause both are run on one same CPU.
+ */
+ set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
+ /*
+ * Order setting BLK_MQ_S_INACTIVE and checking rq->tag & rqs[tag],
+ * and its pair is the smp_mb() in blk_mq_get_driver_tag
+ */
+ smp_mb__after_atomic();
+ blk_mq_hctx_drain_inflight_rqs(hctx);
+ return 0;
+}
+