On Fri, Nov 1, 2019 at 7:23 AM <jgq516@xxxxxxxxx> wrote: > > From: Guoqing Jiang <guoqing.jiang@xxxxxxxxxxxxxxx> > > With the new sysfs node, we can use it to control if raid1 array > wants serialization for write request or not. > > Signed-off-by: Guoqing Jiang <guoqing.jiang@xxxxxxxxxxxxxxx> > --- > drivers/md/md.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++ > drivers/md/md.h | 1 + > 2 files changed, 47 insertions(+) > > diff --git a/drivers/md/md.c b/drivers/md/md.c > index 43b7da334e4a..8df94a58512b 100644 > --- a/drivers/md/md.c > +++ b/drivers/md/md.c > @@ -174,6 +174,7 @@ void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, > memalloc_noio_restore(noio_flag); > if (!mddev->serial_info_pool) > pr_err("can't alloc memory pool for writemostly\n"); > + mddev->serialize_policy = true; > if (!is_suspend) > mddev_resume(mddev); > } > @@ -216,6 +217,7 @@ static void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, > mempool_destroy(mddev->serial_info_pool); > mddev->serial_info_pool = NULL; > } > + mddev->serialize_policy = false; > mddev_resume(mddev); > } > } > @@ -5261,6 +5263,49 @@ static struct md_sysfs_entry md_fail_last_dev = > __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show, > fail_last_dev_store); > > +static ssize_t serialize_policy_show(struct mddev *mddev, char *page) > +{ > + return sprintf(page, "%d\n", mddev->serialize_policy); Maybe show something like "n/a" for non-raid1? > +} > + > +/* > + * Setting serialize_policy to true to enforce write IO is not reordered > + * for raid1. > + */ > +static ssize_t > +serialize_policy_store(struct mddev *mddev, const char *buf, size_t len) > +{ > + int err; > + bool value; > + > + err = kstrtobool(buf, &value); > + if (err || value == mddev->serialize_policy) > + return err ?: -EINVAL; I think we should not return -EINVAL for value == serialize_policy case. > + > + err = mddev_lock(mddev); > + if (err) > + return err; > + if (mddev->pers == NULL || > + (strncmp(mddev->pers->name, "raid1", 4) != 0)) { Check pers->level == 1 instead? Also, we need to recheck serialize_policy here. > + pr_err("md: serialize_policy is only effective for raid1\n"); > + err = -EINVAL; > + goto unlock; > + } > + > + if (value) > + mddev_create_serial_pool(mddev, NULL, false, true); > + else > + mddev_destroy_serial_pool(mddev, NULL, true); > +unlock: > + mddev_unlock(mddev); > + return err ?: len; > +} > + > +static struct md_sysfs_entry md_serialize_policy = > +__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show, > + serialize_policy_store); > + > + > static struct attribute *md_default_attrs[] = { > &md_level.attr, > &md_layout.attr, > @@ -5278,6 +5323,7 @@ static struct attribute *md_default_attrs[] = { > &max_corr_read_errors.attr, > &md_consistency_policy.attr, > &md_fail_last_dev.attr, > + &md_serialize_policy.attr, > NULL, > }; > > diff --git a/drivers/md/md.h b/drivers/md/md.h > index 291a59a94528..161772066dab 100644 > --- a/drivers/md/md.h > +++ b/drivers/md/md.h > @@ -494,6 +494,7 @@ struct mddev { > > bool has_superblocks:1; > bool fail_last_dev:1; > + bool serialize_policy:1; > }; > > enum recovery_flags { > -- > 2.17.1 >