Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- block/blk-mq-sched.c | 19 +++---------------- block/blk-mq.c | 18 +++++++++++------- block/blk-mq.h | 44 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 58 insertions(+), 23 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 112270961af0..8ff74efe4172 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -131,19 +131,8 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) * If we have previous entries on our dispatch list, grab them first for * more fair dispatch. */ - if (!list_empty_careful(&hctx->dispatch)) { - spin_lock(&hctx->lock); - if (!list_empty(&hctx->dispatch)) { - list_splice_init(&hctx->dispatch, &rq_list); - - /* - * BUSY won't be cleared until all requests - * in hctx->dispatch are dispatched successfully - */ - blk_mq_hctx_set_busy(hctx); - } - spin_unlock(&hctx->lock); - } + if (blk_mq_has_dispatch_rqs(hctx)) + blk_mq_take_list_from_dispatch(hctx, &rq_list); /* * Only ask the scheduler for requests, if we didn't have residual @@ -296,9 +285,7 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, * If we already have a real request tag, send directly to * the dispatch list. */ - spin_lock(&hctx->lock); - list_add(&rq->queuelist, &hctx->dispatch); - spin_unlock(&hctx->lock); + blk_mq_add_rq_to_dispatch(hctx, rq); return true; } diff --git a/block/blk-mq.c b/block/blk-mq.c index db635ef06a72..785145f60c1d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -63,7 +63,7 @@ static int blk_mq_poll_stats_bkt(const struct request *rq) bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) { return sbitmap_any_bit_set(&hctx->ctx_map) || - !list_empty_careful(&hctx->dispatch) || + blk_mq_has_dispatch_rqs(hctx) || blk_mq_sched_has_work(hctx); } @@ -1097,9 +1097,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list) rq = list_first_entry(list, struct request, queuelist); blk_mq_put_driver_tag(rq); - spin_lock(&hctx->lock); - list_splice_init(list, &hctx->dispatch); - spin_unlock(&hctx->lock); + blk_mq_add_list_to_dispatch(hctx, list); /* * If SCHED_RESTART was set by the caller of this function and @@ -1874,9 +1872,7 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) if (list_empty(&tmp)) return 0; - spin_lock(&hctx->lock); - list_splice_tail_init(&tmp, &hctx->dispatch); - spin_unlock(&hctx->lock); + blk_mq_add_list_to_dispatch_tail(hctx, &tmp); blk_mq_run_hw_queue(hctx, true); return 0; @@ -1926,6 +1922,13 @@ static void blk_mq_exit_hw_queues(struct request_queue *q, } } +static void blk_mq_init_dispatch(struct request_queue *q, + struct blk_mq_hw_ctx *hctx) +{ + spin_lock_init(&hctx->lock); + INIT_LIST_HEAD(&hctx->dispatch); +} + static int blk_mq_init_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) @@ -1939,6 +1942,7 @@ static int blk_mq_init_hctx(struct request_queue *q, INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); spin_lock_init(&hctx->lock); INIT_LIST_HEAD(&hctx->dispatch); + blk_mq_init_dispatch(q, hctx); hctx->queue = q; hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED; diff --git a/block/blk-mq.h b/block/blk-mq.h index d9f875093613..2ed355881996 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -150,4 +150,48 @@ static inline void blk_mq_hctx_clear_busy(struct blk_mq_hw_ctx *hctx) clear_bit(BLK_MQ_S_BUSY, &hctx->state); } +static inline bool blk_mq_has_dispatch_rqs(struct blk_mq_hw_ctx *hctx) +{ + return !list_empty_careful(&hctx->dispatch); +} + +static inline void blk_mq_add_rq_to_dispatch(struct blk_mq_hw_ctx *hctx, + struct request *rq) +{ + spin_lock(&hctx->lock); + list_add(&rq->queuelist, &hctx->dispatch); + spin_unlock(&hctx->lock); +} + +static inline void blk_mq_add_list_to_dispatch(struct blk_mq_hw_ctx *hctx, + struct list_head *list) +{ + spin_lock(&hctx->lock); + list_splice_init(list, &hctx->dispatch); + spin_unlock(&hctx->lock); +} + +static inline void blk_mq_add_list_to_dispatch_tail(struct blk_mq_hw_ctx *hctx, + struct list_head *list) +{ + spin_lock(&hctx->lock); + list_splice_tail_init(list, &hctx->dispatch); + spin_unlock(&hctx->lock); +} + +static inline void blk_mq_take_list_from_dispatch(struct blk_mq_hw_ctx *hctx, + struct list_head *list) +{ + spin_lock(&hctx->lock); + list_splice_init(&hctx->dispatch, list); + + /* + * BUSY won't be cleared until all requests + * in hctx->dispatch are dispatched successfully + */ + if (!list_empty(list)) + blk_mq_hctx_set_busy(hctx); + spin_unlock(&hctx->lock); +} + #endif -- 2.9.4