From: Federico Gavioli <f.gavioli97@xxxxxxxxx> This patch implements the code to gather the content of the independent_access_ranges structure from the request_queue and copy it into the queue's bfq_data. This copy is done at queue initialization. We copy the access ranges into the bfq_data to avoid taking the queue lock each time we access the ranges. This implementation, however, puts a limit to the maximum independent ranges supported by the scheduler. Such a limit is equal to the constant BFQ_MAX_ACTUATORS. This limit was placed to avoid the allocation of dynamic memory. Co-developed-by: Rory Chen <rory.c.chen@xxxxxxxxxxx> Signed-off-by: Rory Chen <rory.c.chen@xxxxxxxxxxx> Signed-off-by: Federico Gavioli <f.gavioli97@xxxxxxxxx> Signed-off-by: Paolo Valente <paolo.valente@xxxxxxxxxx> --- block/bfq-iosched.c | 54 ++++++++++++++++++++++++++++++++++++++------- block/bfq-iosched.h | 5 +++++ 2 files changed, 51 insertions(+), 8 deletions(-) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index c94b80e3f685..106c8820cc5c 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -1831,10 +1831,26 @@ static bool bfq_bfqq_higher_class_or_weight(struct bfq_queue *bfqq, /* get the index of the actuator that will serve bio */ static unsigned int bfq_actuator_index(struct bfq_data *bfqd, struct bio *bio) { - /* - * Multi-actuator support not complete yet, so always return 0 - * for the moment. - */ + struct blk_independent_access_range *iar; + unsigned int i; + sector_t end; + + /* no search needed if one or zero ranges present */ + if (bfqd->num_actuators < 2) + return 0; + + /* bio_end_sector(bio) gives the sector after the last one */ + end = bio_end_sector(bio) - 1; + + for (i = 0; i < bfqd->num_actuators; i++) { + iar = &(bfqd->ia_ranges[i]); + if (end >= iar->sector && end < iar->sector + iar->nr_sectors) + return i; + } + + WARN_ONCE(true, + "bfq_actuator_index: bio sector out of ranges: end=%llu\n", + end); return 0; } @@ -2479,7 +2495,6 @@ static void bfq_remove_request(struct request_queue *q, if (rq->cmd_flags & REQ_META) bfqq->meta_pending--; - } static bool bfq_bio_merge(struct request_queue *q, struct bio *bio, @@ -7144,6 +7159,8 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) { struct bfq_data *bfqd; struct elevator_queue *eq; + unsigned int i; + struct blk_independent_access_ranges *ia_ranges = q->disk->ia_ranges; eq = elevator_alloc(q, e); if (!eq) @@ -7187,10 +7204,31 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) bfqd->queue = q; /* - * Multi-actuator support not complete yet, default to single - * actuator for the moment. + * If the disk supports multiple actuators, we copy the independent + * access ranges from the request queue structure. */ - bfqd->num_actuators = 1; + spin_lock_irq(&q->queue_lock); + if (ia_ranges) { + /* + * Check if the disk ia_ranges size exceeds the current bfq + * actuator limit. + */ + if (ia_ranges->nr_ia_ranges > BFQ_MAX_ACTUATORS) { + pr_crit("nr_ia_ranges higher than act limit: iars=%d, max=%d.\n", + ia_ranges->nr_ia_ranges, BFQ_MAX_ACTUATORS); + pr_crit("Falling back to single actuator mode.\n"); + bfqd->num_actuators = 0; + } else { + bfqd->num_actuators = ia_ranges->nr_ia_ranges; + + for (i = 0; i < bfqd->num_actuators; i++) + bfqd->ia_ranges[i] = ia_ranges->ia_range[i]; + } + } else { + bfqd->num_actuators = 0; + } + + spin_unlock_irq(&q->queue_lock); INIT_LIST_HEAD(&bfqd->dispatch); diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index f1c2e77cbf9a..90130a893c8f 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -811,6 +811,11 @@ struct bfq_data { */ unsigned int num_actuators; + /* + * Disk independent access ranges for each actuator + * in this device. + */ + struct blk_independent_access_range ia_ranges[BFQ_MAX_ACTUATORS]; }; enum bfqq_state_flags { -- 2.20.1