Since accessing the content of the user_cpus_ptr requires lock protection to ensure its validity, provide a helper function copy_user_cpus_mask() to facilitate its reading. Signed-off-by: Waiman Long <longman@xxxxxxxxxx> --- include/linux/sched.h | 1 + kernel/sched/core.c | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/include/linux/sched.h b/include/linux/sched.h index cf7206a9b29a..e06bc1cbccca 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1831,6 +1831,7 @@ extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effec extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node); +extern struct cpumask *copy_user_cpus_mask(struct task_struct *p, struct cpumask *user_mask); extern void release_user_cpus_ptr(struct task_struct *p); extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask); extern void force_compatible_cpus_allowed_ptr(struct task_struct *p); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7e2576068331..4d3b10e91e1a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2619,6 +2619,24 @@ void release_user_cpus_ptr(struct task_struct *p) kfree(clear_user_cpus_ptr(p)); } +/* + * Return the copied mask pointer or NULL if user mask not available. + */ +struct cpumask *copy_user_cpus_mask(struct task_struct *p, + struct cpumask *user_mask) +{ + struct rq_flags rf; + struct rq *rq = task_rq_lock(p, &rf); + struct cpumask *mask = NULL; + + if (p->user_cpus_ptr) { + cpumask_copy(user_mask, p->user_cpus_ptr); + mask = user_mask; + } + task_rq_unlock(rq, p, &rf); + return mask; +} + /* * This function is wildly self concurrent; here be dragons. * -- 2.31.1