Instead of passing a bool at_head, pass down the full flags from the blk_mq_insert_request interface. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- block/bfq-iosched.c | 16 ++++++++-------- block/blk-mq.c | 5 ++--- block/elevator.h | 3 ++- block/kyber-iosched.c | 5 +++-- block/mq-deadline.c | 9 +++++---- 5 files changed, 20 insertions(+), 18 deletions(-) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index d9ed3108c17af6..db1813e7a0dd72 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -6232,7 +6232,7 @@ static inline void bfq_update_insert_stats(struct request_queue *q, static struct bfq_queue *bfq_init_rq(struct request *rq); static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - bool at_head) + unsigned int flags) { struct request_queue *q = hctx->queue; struct bfq_data *bfqd = q->elevator->elevator_data; @@ -6255,11 +6255,10 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, trace_block_rq_insert(rq); - if (!bfqq || at_head) { - if (at_head) - list_add(&rq->queuelist, &bfqd->dispatch); - else - list_add_tail(&rq->queuelist, &bfqd->dispatch); + if (flags & BLK_MQ_INSERT_AT_HEAD) { + list_add(&rq->queuelist, &bfqd->dispatch); + } else if (!bfqq) { + list_add_tail(&rq->queuelist, &bfqd->dispatch); } else { idle_timer_disabled = __bfq_insert_request(bfqd, rq); /* @@ -6289,14 +6288,15 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, } static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, - struct list_head *list, bool at_head) + struct list_head *list, + unsigned int flags) { while (!list_empty(list)) { struct request *rq; rq = list_first_entry(list, struct request, queuelist); list_del_init(&rq->queuelist); - bfq_insert_request(hctx, rq, at_head); + bfq_insert_request(hctx, rq, flags); } } diff --git a/block/blk-mq.c b/block/blk-mq.c index c0330879aff54a..d6fe56e1aadee2 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2556,8 +2556,7 @@ static void blk_mq_insert_request(struct request *rq, unsigned int flags) WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG); list_add(&rq->queuelist, &list); - q->elevator->type->ops.insert_requests(hctx, &list, - flags & BLK_MQ_INSERT_AT_HEAD); + q->elevator->type->ops.insert_requests(hctx, &list, flags); } else { trace_block_rq_insert(rq); @@ -2768,7 +2767,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) percpu_ref_get(&this_hctx->queue->q_usage_counter); if (this_hctx->queue->elevator) { this_hctx->queue->elevator->type->ops.insert_requests(this_hctx, - &list, false); + &list, 0); blk_mq_run_hw_queue(this_hctx, from_sched); } else { blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched); diff --git a/block/elevator.h b/block/elevator.h index 774a8f6b99e69e..b80b13f505ad99 100644 --- a/block/elevator.h +++ b/block/elevator.h @@ -37,7 +37,8 @@ struct elevator_mq_ops { void (*limit_depth)(blk_opf_t, struct blk_mq_alloc_data *); void (*prepare_request)(struct request *); void (*finish_request)(struct request *); - void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool); + void (*insert_requests)(struct blk_mq_hw_ctx *hctx, struct list_head *list, + unsigned int flags); struct request *(*dispatch_request)(struct blk_mq_hw_ctx *); bool (*has_work)(struct blk_mq_hw_ctx *); void (*completed_request)(struct request *, u64); diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index 2146969237bfed..4d822d70b01f64 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -590,7 +590,8 @@ static void kyber_prepare_request(struct request *rq) } static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx, - struct list_head *rq_list, bool at_head) + struct list_head *rq_list, + unsigned int flags) { struct kyber_hctx_data *khd = hctx->sched_data; struct request *rq, *next; @@ -602,7 +603,7 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx, spin_lock(&kcq->lock); trace_block_rq_insert(rq); - if (at_head) + if (flags & BLK_MQ_INSERT_AT_HEAD) list_move(&rq->queuelist, head); else list_move_tail(&rq->queuelist, head); diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 6f7a86a70dafb4..90cc4345146e6a 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -768,7 +768,7 @@ static bool dd_bio_merge(struct request_queue *q, struct bio *bio, * add rq to rbtree and fifo */ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - bool at_head) + unsigned int flags) { struct request_queue *q = hctx->queue; struct deadline_data *dd = q->elevator->elevator_data; @@ -801,7 +801,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, trace_block_rq_insert(rq); - if (at_head) { + if (flags & BLK_MQ_INSERT_AT_HEAD) { list_add(&rq->queuelist, &per_prio->dispatch); rq->fifo_time = jiffies; } else { @@ -825,7 +825,8 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, * Called from blk_mq_insert_request() or blk_mq_dispatch_plug_list(). */ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, - struct list_head *list, bool at_head) + struct list_head *list, + unsigned int flags) { struct request_queue *q = hctx->queue; struct deadline_data *dd = q->elevator->elevator_data; @@ -836,7 +837,7 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, rq = list_first_entry(list, struct request, queuelist); list_del_init(&rq->queuelist); - dd_insert_request(hctx, rq, at_head); + dd_insert_request(hctx, rq, flags); } spin_unlock(&dd->lock); } -- 2.39.2