On 28/07/2020 15:50, Ming Lei wrote: [...] > -static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) > - __acquires(hctx->srcu) > +static void hctx_lock(struct blk_mq_hw_ctx *hctx) > { > - if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { > - /* shut up gcc false positive */ > - *srcu_idx = 0; > + if (!(hctx->flags & BLK_MQ_F_BLOCKING)) > rcu_read_lock(); > - } else > - *srcu_idx = srcu_read_lock(hctx->srcu); > + else > + percpu_ref_get(&hctx->queue->dispatch_counter); > } I quite like this because it hides the internals of hctx_lock() (rcu vs srcu) from the callers. > /** > @@ -1486,8 +1479,6 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, > */ > static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) > { > - int srcu_idx; > - > /* > * We should be running this queue from one of the CPUs that > * are mapped to it. > @@ -1521,9 +1512,9 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) > > might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); > > - hctx_lock(hctx, &srcu_idx); > + hctx_lock(hctx); > blk_mq_sched_dispatch_requests(hctx); > - hctx_unlock(hctx, srcu_idx); > + hctx_unlock(hctx); blk_mq_sched_dispatch_requests also has this comment at the beginning: /* RCU or SRCU read lock is needed before checking quiesced flag */ if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) I think the SRCU part needs to be changed to percpu_ref or the whole comment should probably just mention the hctx_lock().