[PATCH 7/9] signal: Make individual tasks exiting a first class concept.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Implement start_task_exit_locked and rewrite the de_thread logic
in exec using it.

Calling start_task_exit_locked is equivalent to asyncrhonously
calling exit(2) aka pthread_exit on a task.

Signed-off-by: "Eric W. Biederman" <ebiederm@xxxxxxxxxxxx>
---
 fs/exec.c                    | 10 +++++++++-
 include/linux/sched/jobctl.h |  2 ++
 include/linux/sched/signal.h |  1 +
 kernel/signal.c              | 37 ++++++++++++++++--------------------
 4 files changed, 28 insertions(+), 22 deletions(-)

diff --git a/fs/exec.c b/fs/exec.c
index 18594f11c31f..b6f50213f0a0 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1040,6 +1040,7 @@ static int de_thread(struct task_struct *tsk)
 	struct signal_struct *sig = tsk->signal;
 	struct sighand_struct *oldsighand = tsk->sighand;
 	spinlock_t *lock = &oldsighand->siglock;
+	struct task_struct *t;
 
 	if (thread_group_empty(tsk))
 		goto no_thread_group;
@@ -1058,7 +1059,14 @@ static int de_thread(struct task_struct *tsk)
 	}
 
 	sig->group_exit_task = tsk;
-	sig->notify_count = zap_other_threads(tsk);
+	sig->group_stop_count = 0;
+	sig->notify_count = 0;
+	__for_each_thread(sig, t) {
+		if (t == tsk)
+			continue;
+		sig->notify_count++;
+		start_task_exit_locked(t, SIGKILL);
+	}
 	if (!thread_group_leader(tsk))
 		sig->notify_count--;
 
diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h
index fa067de9f1a9..e94833b0c819 100644
--- a/include/linux/sched/jobctl.h
+++ b/include/linux/sched/jobctl.h
@@ -19,6 +19,7 @@ struct task_struct;
 #define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
 #define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
 #define JOBCTL_TRAP_FREEZE_BIT	23	/* trap for cgroup freezer */
+#define JOBCTL_TASK_EXITING_BIT 31	/* the task is exiting */
 
 #define JOBCTL_STOP_DEQUEUED	(1UL << JOBCTL_STOP_DEQUEUED_BIT)
 #define JOBCTL_STOP_PENDING	(1UL << JOBCTL_STOP_PENDING_BIT)
@@ -28,6 +29,7 @@ struct task_struct;
 #define JOBCTL_TRAPPING		(1UL << JOBCTL_TRAPPING_BIT)
 #define JOBCTL_LISTENING	(1UL << JOBCTL_LISTENING_BIT)
 #define JOBCTL_TRAP_FREEZE	(1UL << JOBCTL_TRAP_FREEZE_BIT)
+#define JOBCTL_TASK_EXITING	(1UL << JOBCTL_TASK_EXITING_BIT)
 
 #define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
 #define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index c007e55cb119..a958381ba4a9 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -429,6 +429,7 @@ static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
 }
 
 void start_group_exit(int exit_code);
+void start_task_exit_locked(struct task_struct *task, int exit_code);
 
 void task_join_group_stop(struct task_struct *task);
 
diff --git a/kernel/signal.c b/kernel/signal.c
index 95a076af600a..afbc001220dd 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -264,6 +264,12 @@ static inline void print_dropped_signal(int sig)
 				current->comm, current->pid, sig);
 }
 
+static void task_set_jobctl_exiting(struct task_struct *task, int exit_code)
+{
+	WARN_ON_ONCE(task->jobctl & ~JOBCTL_STOP_SIGMASK);
+	task->jobctl = JOBCTL_TASK_EXITING | (exit_code & JOBCTL_STOP_SIGMASK);
+}
+
 /**
  * task_set_jobctl_pending - set jobctl pending bits
  * @task: target task
@@ -1407,28 +1413,15 @@ int force_sig_info(struct kernel_siginfo *info)
 	return force_sig_info_to_task(info, current, false);
 }
 
-/*
- * Nuke all other threads in the group.
- */
-int zap_other_threads(struct task_struct *p)
+void start_task_exit_locked(struct task_struct *task, int exit_code)
 {
-	struct task_struct *t = p;
-	int count = 0;
-
-	p->signal->group_stop_count = 0;
-
-	while_each_thread(p, t) {
-		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
-		count++;
-
-		/* Don't bother with already dead threads */
-		if (t->exit_state)
-			continue;
-		sigaddset(&t->pending.signal, SIGKILL);
-		signal_wake_up(t, 1);
+	task_clear_jobctl_pending(task, JOBCTL_PENDING_MASK);
+	/* Only bother with threads that might be alive */
+	if (!task->exit_state) {
+		task_set_jobctl_exiting(task, exit_code);
+		sigaddset(&task->pending.signal, SIGKILL);
+		signal_wake_up(task, 1);
 	}
-
-	return count;
 }
 
 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
@@ -2714,7 +2707,7 @@ bool get_signal(struct ksignal *ksig)
 	}
 
 	/* Has this task already been marked for death? */
-	if (signal_group_exit(signal)) {
+	if (signal_group_exit(signal) || (current->jobctl & JOBCTL_TASK_EXITING)) {
 		ksig->info.si_signo = signr = SIGKILL;
 		sigdelset(&current->pending.signal, SIGKILL);
 		trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
@@ -2874,6 +2867,8 @@ bool get_signal(struct ksignal *ksig)
 			if (signal_group_exit(signal)) {
 				/* Another thread got here before we took the lock.  */
 				exit_code = signal->group_exit_code;
+			} else if (current->jobctl & JOBCTL_TASK_EXITING) {
+				exit_code = current->jobctl & JOBCTL_STOP_SIGMASK;
 			} else {
 				start_group_exit_locked(signal, exit_code);
 			}
-- 
2.20.1




[Index of Archives]     [Linux Kernel]     [Kernel Newbies]     [x86 Platform Driver]     [Netdev]     [Linux Wireless]     [Netfilter]     [Bugtraq]     [Linux Filesystems]     [Yosemite Discussion]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Samba]     [Device Mapper]

  Powered by Linux