membarrier reads cpu_rq(remote cpu)->curr->mm without locking. Use READ_ONCE() and WRITE_ONCE() to remove the data races. Cc: Mathieu Desnoyers <mathieu.desnoyers@xxxxxxxxxxxx> Cc: Nicholas Piggin <npiggin@xxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Acked-by: Nicholas Piggin <npiggin@xxxxxxxxx> Signed-off-by: Andy Lutomirski <luto@xxxxxxxxxx> --- fs/exec.c | 2 +- kernel/exit.c | 2 +- kernel/kthread.c | 4 ++-- kernel/sched/membarrier.c | 7 ++++--- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/fs/exec.c b/fs/exec.c index 3abbd0294e73..38b05e01c5bd 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1006,7 +1006,7 @@ static int exec_mmap(struct mm_struct *mm) local_irq_disable(); active_mm = tsk->active_mm; tsk->active_mm = mm; - tsk->mm = mm; + WRITE_ONCE(tsk->mm, mm); /* membarrier reads this without locks */ /* * This prevents preemption while active_mm is being loaded and * it and mm are being updated, which could cause problems for diff --git a/kernel/exit.c b/kernel/exit.c index 91a43e57a32e..70f2cbc42015 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -491,7 +491,7 @@ static void exit_mm(void) */ smp_mb__after_spinlock(); local_irq_disable(); - current->mm = NULL; + WRITE_ONCE(current->mm, NULL); membarrier_update_current_mm(NULL); enter_lazy_tlb(mm, current); local_irq_enable(); diff --git a/kernel/kthread.c b/kernel/kthread.c index 396ae78a1a34..3b18329f885c 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -1358,7 +1358,7 @@ void kthread_use_mm(struct mm_struct *mm) mmgrab(mm); tsk->active_mm = mm; } - tsk->mm = mm; + WRITE_ONCE(tsk->mm, mm); /* membarrier reads this without locks */ membarrier_update_current_mm(mm); switch_mm_irqs_off(active_mm, mm, tsk); membarrier_finish_switch_mm(mm); @@ -1399,7 +1399,7 @@ void kthread_unuse_mm(struct mm_struct *mm) smp_mb__after_spinlock(); sync_mm_rss(mm); local_irq_disable(); - tsk->mm = NULL; + WRITE_ONCE(tsk->mm, NULL); /* membarrier reads this without locks */ membarrier_update_current_mm(NULL); /* active_mm is still 'mm' */ enter_lazy_tlb(mm, tsk); diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c index 30e964b9689d..327830f89c37 100644 --- a/kernel/sched/membarrier.c +++ b/kernel/sched/membarrier.c @@ -411,7 +411,7 @@ static int membarrier_private_expedited(int flags, int cpu_id) goto out; rcu_read_lock(); p = rcu_dereference(cpu_rq(cpu_id)->curr); - if (!p || p->mm != mm) { + if (!p || READ_ONCE(p->mm) != mm) { rcu_read_unlock(); goto out; } @@ -424,7 +424,7 @@ static int membarrier_private_expedited(int flags, int cpu_id) struct task_struct *p; p = rcu_dereference(cpu_rq(cpu)->curr); - if (p && p->mm == mm) + if (p && READ_ONCE(p->mm) == mm) __cpumask_set_cpu(cpu, tmpmask); } rcu_read_unlock(); @@ -522,7 +522,8 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm) struct task_struct *p; p = rcu_dereference(rq->curr); - if (p && p->mm == mm) + /* exec and kthread_use_mm() write ->mm without locks */ + if (p && READ_ONCE(p->mm) == mm) __cpumask_set_cpu(cpu, tmpmask); } rcu_read_unlock(); -- 2.33.1