Replace the at_head bool with a flags argument that so far only contains a single BLK_MQ_INSERT_AT_HEAD value. This makes it much easier to grep for head insertations into the blk-mq dispatch queues. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- block/blk-mq.c | 19 ++++++++++--------- block/blk-mq.h | 2 ++ 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index edc82ecf7f5b77..5a4ae0e4080d45 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -46,7 +46,7 @@ static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); -static void blk_mq_insert_request(struct request *rq, bool at_head); +static void blk_mq_insert_request(struct request *rq, unsigned int flags); static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, struct list_head *list); @@ -1310,7 +1310,7 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head) return; } - blk_mq_insert_request(rq, at_head); + blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0); blk_mq_run_hw_queue(hctx, false); } EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); @@ -1373,7 +1373,7 @@ blk_status_t blk_execute_rq(struct request *rq, bool at_head) rq->end_io = blk_end_sync_rq; blk_account_io_start(rq); - blk_mq_insert_request(rq, at_head); + blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0); blk_mq_run_hw_queue(hctx, false); if (blk_rq_is_poll(rq)) { @@ -1447,14 +1447,14 @@ static void blk_mq_requeue_work(struct work_struct *work) } else if (rq->rq_flags & RQF_SOFTBARRIER) { rq->rq_flags &= ~RQF_SOFTBARRIER; list_del_init(&rq->queuelist); - blk_mq_insert_request(rq, true); + blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD); } } while (!list_empty(&rq_list)) { rq = list_entry(rq_list.next, struct request, queuelist); list_del_init(&rq->queuelist); - blk_mq_insert_request(rq, false); + blk_mq_insert_request(rq, 0); } blk_mq_run_hw_queues(q, false); @@ -2509,7 +2509,7 @@ static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, blk_mq_run_hw_queue(hctx, run_queue_async); } -static void blk_mq_insert_request(struct request *rq, bool at_head) +static void blk_mq_insert_request(struct request *rq, unsigned int flags) { struct request_queue *q = rq->q; struct blk_mq_ctx *ctx = rq->mq_ctx; @@ -2526,7 +2526,7 @@ static void blk_mq_insert_request(struct request *rq, bool at_head) * and it is added to the scheduler queue, there is no chance to * dispatch it given we prioritize requests in hctx->dispatch. */ - blk_mq_request_bypass_insert(rq, at_head); + blk_mq_request_bypass_insert(rq, flags & BLK_MQ_INSERT_AT_HEAD); } else if (rq->rq_flags & RQF_FLUSH_SEQ) { /* * Firstly normal IO request is inserted to scheduler queue or @@ -2556,12 +2556,13 @@ static void blk_mq_insert_request(struct request *rq, bool at_head) WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG); list_add(&rq->queuelist, &list); - q->elevator->type->ops.insert_requests(hctx, &list, at_head); + q->elevator->type->ops.insert_requests(hctx, &list, + flags & BLK_MQ_INSERT_AT_HEAD); } else { trace_block_rq_insert(rq); spin_lock(&ctx->lock); - if (at_head) + if (flags & BLK_MQ_INSERT_AT_HEAD) list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]); else list_add_tail(&rq->queuelist, diff --git a/block/blk-mq.h b/block/blk-mq.h index cc17e942753117..ab2f4bfa0de6a4 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -30,6 +30,8 @@ struct blk_mq_ctx { struct kobject kobj; } ____cacheline_aligned_in_smp; +#define BLK_MQ_INSERT_AT_HEAD (1U << 0) + void blk_mq_submit_bio(struct bio *bio); int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, unsigned int flags); -- 2.39.2