On Wed, Aug 18, 2021 at 12:56:41PM +0200, Peter Zijlstra wrote: > On Wed, Aug 18, 2021 at 11:42:28AM +0100, Will Deacon wrote: > > I think the idea looks good, but perhaps we could wrap things up a bit: > > > > /* Comment about why this is useful with RT */ > > static cpumask_t *clear_user_cpus_ptr(struct task_struct *p) > > { > > struct cpumask *user_mask = NULL; > > > > swap(user_mask, p->user_cpus_ptr); > > return user_mask; > > } > > > > void release_user_cpus_ptr(struct task_struct *p) > > { > > kfree(clear_user_cpus_ptr(p)); > > } > > > > Then just use clear_user_cpus_ptr() in sched/core.c where we know what > > we're doing (well, at least one of us does!). > > OK, I'll go make it like that. Something like so then? --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2497,10 +2497,18 @@ int dup_user_cpus_ptr(struct task_struct return 0; } +static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p) +{ + struct cpumask *user_mask = NULL; + + swap(p->user_cpus_ptr, user_mask); + + return user_mask; +} + void release_user_cpus_ptr(struct task_struct *p) { - kfree(p->user_cpus_ptr); - p->user_cpus_ptr = NULL; + kfree(clear_user_cpus_ptr(p)); } /* @@ -2733,6 +2741,7 @@ static int __set_cpus_allowed_ptr_locked const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p); const struct cpumask *cpu_valid_mask = cpu_active_mask; bool kthread = p->flags & PF_KTHREAD; + struct cpumask *user_mask = NULL; unsigned int dest_cpu; int ret = 0; @@ -2792,9 +2801,13 @@ static int __set_cpus_allowed_ptr_locked __do_set_cpus_allowed(p, new_mask, flags); if (flags & SCA_USER) - release_user_cpus_ptr(p); + user_mask = clear_user_cpus_ptr(p); - return affine_move_task(rq, p, rf, dest_cpu, flags); + ret = affine_move_task(rq, p, rf, dest_cpu, flags); + + kfree(user_mask); + + return ret; out: task_rq_unlock(rq, p, rf); @@ -2941,20 +2954,22 @@ __sched_setaffinity(struct task_struct * */ void relax_compatible_cpus_allowed_ptr(struct task_struct *p) { + struct cpumask *user_mask = p->user_cpus_ptr; unsigned long flags; - struct cpumask *mask = p->user_cpus_ptr; /* * Try to restore the old affinity mask. If this fails, then * we free the mask explicitly to avoid it being inherited across * a subsequent fork(). */ - if (!mask || !__sched_setaffinity(p, mask)) + if (!user_mask || !__sched_setaffinity(p, user_mask)) return; raw_spin_lock_irqsave(&p->pi_lock, flags); - release_user_cpus_ptr(p); + user_mask = clear_user_cpus_ptr(p); raw_spin_unlock_irqrestore(&p->pi_lock, flags); + + kfree(user_mask); } void set_task_cpu(struct task_struct *p, unsigned int new_cpu)