refcount_t type and corresponding API should be used instead of atomic_t when the variable is used as a reference counter. This allows to avoid accidental refcounter overflows that might lead to use-after-free situations. Signed-off-by: Elena Reshetova <elena.reshetova@xxxxxxxxx> Signed-off-by: Hans Liljestrand <ishkamiel@xxxxxxxxx> Signed-off-by: Kees Cook <keescook@xxxxxxxxxxxx> Signed-off-by: David Windsor <dwindsor@xxxxxxxxx> --- include/linux/sched/signal.h | 2 +- kernel/fork.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 4d5cdf1..c5f1a67 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -77,7 +77,7 @@ struct thread_group_cputimer { * the locking of signal_struct. */ struct signal_struct { - atomic_t sigcnt; + refcount_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; diff --git a/kernel/fork.c b/kernel/fork.c index 4e28f50..a9763f6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -399,7 +399,7 @@ static inline void free_signal_struct(struct signal_struct *sig) static inline void put_signal_struct(struct signal_struct *sig) { - if (atomic_dec_and_test(&sig->sigcnt)) + if (refcount_dec_and_test(&sig->sigcnt)) free_signal_struct(sig); } @@ -1379,7 +1379,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) sig->nr_threads = 1; atomic_set(&sig->live, 1); - atomic_set(&sig->sigcnt, 1); + refcount_set(&sig->sigcnt, 1); /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */ sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node); @@ -1888,7 +1888,7 @@ static __latent_entropy struct task_struct *copy_process( } else { current->signal->nr_threads++; atomic_inc(¤t->signal->live); - atomic_inc(¤t->signal->sigcnt); + refcount_inc(¤t->signal->sigcnt); list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); list_add_tail_rcu(&p->thread_node, -- 2.7.4