The percentage of I/O latencies completed with 750 nsec increases from 97.186% to 99.324% in average, in a rough estimation the write latency improves (reduces) around 2.138%. This is quite ideal result, I believe on slow hard drives such small code-running optimization will be overwhelmed by hardware latency and hard to be recognized. This patch will go back to binary search when the linear device grows and conf->disks array cannot be placed within a single cache line. Although the optimization result is tiny in most of cases, it is good to have it since we don't pay any other cost. Signed-off-by: Coly Li <colyli@xxxxxxxxxx> Cc: Song Liu <song@xxxxxxxxxx> Cc: Yu Kuai <yukuai3@xxxxxxxxxx> --- drivers/md/md-linear.c | 44 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c index a382929ce7ba..c158b74371b1 100644 --- a/drivers/md/md-linear.c +++ b/drivers/md/md-linear.c @@ -25,10 +25,12 @@ struct linear_conf { struct dev_info disks[] __counted_by(raid_disks); }; +static int prefer_linear_dev_search; + /* * find which device holds a particular offset */ -static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) +static inline struct dev_info *__which_dev(struct mddev *mddev, sector_t sector) { int lo, mid, hi; struct linear_conf *conf; @@ -53,6 +55,33 @@ static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) return conf->disks + lo; } +/* + * If conf->disk[] can be hold within a L1 cache line, + * linear search is fater than binary search. + */ +static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) +{ + int i; + + if (prefer_linear_dev_search) { + struct linear_conf *conf; + struct dev_info *dev; + int max; + + conf = mddev->private; + dev = conf->disks; + max = conf->raid_disks; + for (i = 0; i < max; i++, dev++) { + if (sector < dev->end_sector) + return dev; + } + } + + /* slow path */ + return __which_dev(mddev, sector); +} + + static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) { struct linear_conf *conf; @@ -222,6 +251,18 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev) md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); set_capacity_and_notify(mddev->gendisk, mddev->array_sectors); kfree_rcu(oldconf, rcu); + + /* + * When elements in linear_conf->disks[] becomes large enought, + * set prefer_linear_dev_search as 0 to indicate linear search + * in which_dev() is not optimized. Slow path in __which_dev() + * might be faster. + */ + if ((mddev->raid_disks * sizeof(struct dev_info)) > + cache_line_size() && + prefer_linear_dev_search == 1) + prefer_linear_dev_search = 0; + return 0; } @@ -337,6 +378,7 @@ static struct md_personality linear_personality = { static int __init linear_init(void) { + prefer_linear_dev_search = 1; return register_md_personality(&linear_personality); } -- 2.48.1