A later patch will preserve the order of pipelined zoned writes by submitting all zoned writes per zone to the same software queue as previously submitted zoned writes. Hence support allocating a request from a specific software queue. Cc: Christoph Hellwig <hch@xxxxxx> Cc: Damien Le Moal <dlemoal@xxxxxxxxxx> Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx> --- block/blk-mq.c | 18 ++++++++++++++---- block/blk-mq.h | 1 + 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 666e6e6ba143..4262c85be206 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -495,6 +495,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data) static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) { struct request_queue *q = data->q; + int swq_cpu = data->swq_cpu; u64 alloc_time_ns = 0; struct request *rq; unsigned int tag; @@ -507,7 +508,8 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) data->flags |= BLK_MQ_REQ_NOWAIT; retry: - data->ctx = blk_mq_get_ctx(q); + data->swq_cpu = swq_cpu >= 0 ? swq_cpu : raw_smp_processor_id(); + data->ctx = __blk_mq_get_ctx(q, data->swq_cpu); data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); if (q->elevator) { @@ -587,6 +589,7 @@ static struct request *blk_mq_rq_cache_fill(struct request_queue *q, .cmd_flags = opf, .nr_tags = plug->nr_ios, .cached_rqs = &plug->cached_rqs, + .swq_cpu = -1, }; struct request *rq; @@ -648,6 +651,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, .flags = flags, .cmd_flags = opf, .nr_tags = 1, + .swq_cpu = -1, }; int ret; @@ -2964,12 +2968,14 @@ static bool blk_mq_attempt_bio_merge(struct request_queue *q, } static struct request *blk_mq_get_new_requests(struct request_queue *q, + int swq_cpu, struct blk_plug *plug, struct bio *bio, unsigned int nsegs) { struct blk_mq_alloc_data data = { .q = q, + .swq_cpu = swq_cpu, .nr_tags = 1, .cmd_flags = bio->bi_opf, }; @@ -2993,7 +2999,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q, * Check if there is a suitable cached request and return it. */ static struct request *blk_mq_peek_cached_request(struct blk_plug *plug, - struct request_queue *q, blk_opf_t opf) + struct request_queue *q, + int swq_cpu, blk_opf_t opf) { enum hctx_type type = blk_mq_get_hctx_type(opf); struct request *rq; @@ -3003,6 +3010,8 @@ static struct request *blk_mq_peek_cached_request(struct blk_plug *plug, rq = rq_list_peek(&plug->cached_rqs); if (!rq || rq->q != q) return NULL; + if (swq_cpu >= 0 && rq->mq_ctx->cpu != swq_cpu) + return NULL; if (type != rq->mq_hctx->type && (type != HCTX_TYPE_READ || rq->mq_hctx->type != HCTX_TYPE_DEFAULT)) return NULL; @@ -3061,6 +3070,7 @@ void blk_mq_submit_bio(struct bio *bio) struct blk_mq_hw_ctx *hctx; unsigned int nr_segs; struct request *rq; + int swq_cpu = -1; blk_status_t ret; /* @@ -3109,7 +3119,7 @@ void blk_mq_submit_bio(struct bio *bio) goto queue_exit; new_request: - rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf); + rq = blk_mq_peek_cached_request(plug, q, swq_cpu, bio->bi_opf); if (rq) { blk_mq_use_cached_rq(rq, plug, bio); /* @@ -3119,7 +3129,7 @@ void blk_mq_submit_bio(struct bio *bio) */ blk_queue_exit(q); } else { - rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); + rq = blk_mq_get_new_requests(q, swq_cpu, plug, bio, nr_segs); if (unlikely(!rq)) { if (bio->bi_opf & REQ_NOWAIT) bio_wouldblock_error(bio); diff --git a/block/blk-mq.h b/block/blk-mq.h index 44979e92b79f..d5536dcf2182 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -158,6 +158,7 @@ struct blk_mq_alloc_data { struct rq_list *cached_rqs; /* input & output parameter */ + int swq_cpu; struct blk_mq_ctx *ctx; struct blk_mq_hw_ctx *hctx; };