The following commit has been merged into the timers/core branch of tip: Commit-ID: 5cac427f7971b0619ebbfc131ef81fcf229c3c01 Gitweb: https://git.kernel.org/tip/5cac427f7971b0619ebbfc131ef81fcf229c3c01 Author: Thomas Gleixner <tglx@xxxxxxxxxxxxx> AuthorDate: Tue, 05 Nov 2024 09:14:38 +01:00 Committer: Thomas Gleixner <tglx@xxxxxxxxxxxxx> CommitterDate: Thu, 07 Nov 2024 02:14:44 +01:00 signal: Split up __sigqueue_alloc() To cure the SIG_IGN handling for posix interval timers, the preallocated sigqueue needs to be embedded into struct k_itimer to prevent life time races of all sorts. Reorganize __sigqueue_alloc() so the ucounts retrieval and the initialization can be used independently. No functional change. Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Reviewed-by: Frederic Weisbecker <frederic@xxxxxxxxxx> Acked-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Link: https://lore.kernel.org/all/20241105064213.371410037@xxxxxxxxxxxxx --- kernel/signal.c | 52 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 17 deletions(-) diff --git a/kernel/signal.c b/kernel/signal.c index ba7159b..dbd4247 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -396,16 +396,9 @@ void task_join_group_stop(struct task_struct *task) task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING); } -/* - * allocate a new signal queue record - * - this may be called without locks if and only if t == current, otherwise an - * appropriate lock must be held to stop the target task from exiting - */ -static struct sigqueue * -__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags, - int override_rlimit, const unsigned int sigqueue_flags) +static struct ucounts *sig_get_ucounts(struct task_struct *t, int sig, + int override_rlimit) { - struct sigqueue *q = NULL; struct ucounts *ucounts; long sigpending; @@ -424,19 +417,44 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags, if (!sigpending) return NULL; - if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) { - q = kmem_cache_alloc(sigqueue_cachep, gfp_flags); - } else { + if (unlikely(!override_rlimit && sigpending > task_rlimit(t, RLIMIT_SIGPENDING))) { + dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING); print_dropped_signal(sig); + return NULL; } - if (unlikely(q == NULL)) { + return ucounts; +} + +static void __sigqueue_init(struct sigqueue *q, struct ucounts *ucounts, + const unsigned int sigqueue_flags) +{ + INIT_LIST_HEAD(&q->list); + q->flags = sigqueue_flags; + q->ucounts = ucounts; +} + +/* + * allocate a new signal queue record + * - this may be called without locks if and only if t == current, otherwise an + * appropriate lock must be held to stop the target task from exiting + */ +static struct sigqueue *__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags, + int override_rlimit, const unsigned int sigqueue_flags) +{ + struct ucounts *ucounts = sig_get_ucounts(t, sig, override_rlimit); + struct sigqueue *q; + + if (!ucounts) + return NULL; + + q = kmem_cache_alloc(sigqueue_cachep, gfp_flags); + if (!q) { dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING); - } else { - INIT_LIST_HEAD(&q->list); - q->flags = sigqueue_flags; - q->ucounts = ucounts; + return NULL; } + + __sigqueue_init(q, ucounts, sigqueue_flags); return q; }