Even if we have multiple queues in the plug list, chances that they are very interspersed is minimal. Don't bother spending CPU cycles sorting the list. Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- block/blk-mq.c | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index d957b6812a98..58774267dd95 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -19,7 +19,6 @@ #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/llist.h> -#include <linux/list_sort.h> #include <linux/cpu.h> #include <linux/cache.h> #include <linux/sched/sysctl.h> @@ -2151,20 +2150,6 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, spin_unlock(&ctx->lock); } -static int plug_rq_cmp(void *priv, const struct list_head *a, - const struct list_head *b) -{ - struct request *rqa = container_of(a, struct request, queuelist); - struct request *rqb = container_of(b, struct request, queuelist); - - if (rqa->mq_ctx != rqb->mq_ctx) - return rqa->mq_ctx > rqb->mq_ctx; - if (rqa->mq_hctx != rqb->mq_hctx) - return rqa->mq_hctx > rqb->mq_hctx; - - return blk_rq_pos(rqa) > blk_rq_pos(rqb); -} - void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) { LIST_HEAD(list); @@ -2172,10 +2157,6 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) if (list_empty(&plug->mq_list)) return; list_splice_init(&plug->mq_list, &list); - - if (plug->rq_count > 2 && plug->multiple_queues) - list_sort(NULL, &list, plug_rq_cmp); - plug->rq_count = 0; do { -- 2.33.1