On Fri, Feb 08, 2019 at 10:05:42AM +0000, Patrick Bellasi wrote: > +int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, > + void __user *buffer, size_t *lenp, > + loff_t *ppos) > +{ > + int old_min, old_max; > + int result = 0; Should this not have an internal mutex to serialize concurrent usage? See for example sched_rt_handler(). > + > + old_min = sysctl_sched_uclamp_util_min; > + old_max = sysctl_sched_uclamp_util_max; > + > + result = proc_dointvec(table, write, buffer, lenp, ppos); > + if (result) > + goto undo; > + if (!write) > + goto done; > + > + if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max || > + sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE) { > + result = -EINVAL; > + goto undo; > + } > + > + if (old_min != sysctl_sched_uclamp_util_min) { > + uclamp_default[UCLAMP_MIN].value = > + sysctl_sched_uclamp_util_min; > + uclamp_default[UCLAMP_MIN].bucket_id = > + uclamp_bucket_id(sysctl_sched_uclamp_util_min); > + } > + if (old_max != sysctl_sched_uclamp_util_max) { > + uclamp_default[UCLAMP_MAX].value = > + sysctl_sched_uclamp_util_max; > + uclamp_default[UCLAMP_MAX].bucket_id = > + uclamp_bucket_id(sysctl_sched_uclamp_util_max); > + } > + > + /* > + * Updating all the RUNNABLE task is expensive, keep it simple and do > + * just a lazy update at each next enqueue time. > + */ > + goto done; > + > +undo: > + sysctl_sched_uclamp_util_min = old_min; > + sysctl_sched_uclamp_util_max = old_max; > +done: > + > + return result; > +}