From: Jan Kara <jack@xxxxxxx> commit 5f550ede5edf846ecc0067be1ba80514e6fe7f8e upstream. We call bfq_init_rq() from request merging functions where requests we get should have already gone through bfq_init_rq() during insert and anyway we want to do anything only if the request is already tracked by BFQ. So replace calls to bfq_init_rq() with RQ_BFQQ() instead to simply skip requests untracked by BFQ. We move bfq_init_rq() call in bfq_insert_request() a bit earlier to cover request merging and thus can transfer FIFO position in case of a merge. CC: stable@xxxxxxxxxxxxxxx Tested-by: "yukuai (C)" <yukuai3@xxxxxxxxxx> Signed-off-by: Jan Kara <jack@xxxxxxx> Reviewed-by: Christoph Hellwig <hch@xxxxxx> Link: https://lore.kernel.org/r/20220401102752.8599-6-jack@xxxxxxx Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- block/bfq-iosched.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -2267,8 +2267,6 @@ static int bfq_request_merge(struct requ return ELEVATOR_NO_MERGE; } -static struct bfq_queue *bfq_init_rq(struct request *rq); - static void bfq_request_merged(struct request_queue *q, struct request *req, enum elv_merge type) { @@ -2277,7 +2275,7 @@ static void bfq_request_merged(struct re blk_rq_pos(req) < blk_rq_pos(container_of(rb_prev(&req->rb_node), struct request, rb_node))) { - struct bfq_queue *bfqq = bfq_init_rq(req); + struct bfq_queue *bfqq = RQ_BFQQ(req); struct bfq_data *bfqd; struct request *prev, *next_rq; @@ -2329,8 +2327,8 @@ static void bfq_request_merged(struct re static void bfq_requests_merged(struct request_queue *q, struct request *rq, struct request *next) { - struct bfq_queue *bfqq = bfq_init_rq(rq), - *next_bfqq = bfq_init_rq(next); + struct bfq_queue *bfqq = RQ_BFQQ(rq), + *next_bfqq = RQ_BFQQ(next); if (!bfqq) return; @@ -5518,6 +5516,8 @@ static inline void bfq_update_insert_sta unsigned int cmd_flags) {} #endif /* CONFIG_BFQ_CGROUP_DEBUG */ +static struct bfq_queue *bfq_init_rq(struct request *rq); + static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, bool at_head) { @@ -5532,6 +5532,7 @@ static void bfq_insert_request(struct bl bfqg_stats_update_legacy_io(q, rq); #endif spin_lock_irq(&bfqd->lock); + bfqq = bfq_init_rq(rq); if (blk_mq_sched_try_insert_merge(q, rq)) { spin_unlock_irq(&bfqd->lock); return; @@ -5539,7 +5540,6 @@ static void bfq_insert_request(struct bl blk_mq_sched_request_inserted(rq); - bfqq = bfq_init_rq(rq); if (!bfqq || at_head || blk_rq_is_passthrough(rq)) { if (at_head) list_add(&rq->queuelist, &bfqd->dispatch);