signal: Guarantee that SIGNAL_GROUP_EXIT is set on process exit
Track how many threads have not started exiting and when the last thread starts exiting set SIGNAL_GROUP_EXIT. This guarantees that SIGNAL_GROUP_EXIT will get set when a process exits. In practice this achieves nothing as glibc's implementation of _exit calls sys_group_exit then sys_exit. While glibc's implemenation of pthread_exit calls exit (which cleansup and calls _exit) if it is the last thread and sys_exit if it is the last thread. This means the only way the kernel might observe a process that does not set call exit_group is if the language runtime does not use glibc. With more cleanups I hope to move the decrement of quick_threads earlier. Link: https://lkml.kernel.org/r/87bkukd4tc.fsf_-_@email.froward.int.ebiederm.org Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
This commit is contained in:
@@ -94,6 +94,7 @@ struct signal_struct {
|
|||||||
refcount_t sigcnt;
|
refcount_t sigcnt;
|
||||||
atomic_t live;
|
atomic_t live;
|
||||||
int nr_threads;
|
int nr_threads;
|
||||||
|
int quick_threads;
|
||||||
struct list_head thread_head;
|
struct list_head thread_head;
|
||||||
|
|
||||||
wait_queue_head_t wait_chldexit; /* for wait4() */
|
wait_queue_head_t wait_chldexit; /* for wait4() */
|
||||||
|
|||||||
@@ -733,11 +733,29 @@ static void check_stack_usage(void)
|
|||||||
static inline void check_stack_usage(void) {}
|
static inline void check_stack_usage(void) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static void synchronize_group_exit(struct task_struct *tsk, long code)
|
||||||
|
{
|
||||||
|
struct sighand_struct *sighand = tsk->sighand;
|
||||||
|
struct signal_struct *signal = tsk->signal;
|
||||||
|
|
||||||
|
spin_lock_irq(&sighand->siglock);
|
||||||
|
signal->quick_threads--;
|
||||||
|
if ((signal->quick_threads == 0) &&
|
||||||
|
!(signal->flags & SIGNAL_GROUP_EXIT)) {
|
||||||
|
signal->flags = SIGNAL_GROUP_EXIT;
|
||||||
|
signal->group_exit_code = code;
|
||||||
|
signal->group_stop_count = 0;
|
||||||
|
}
|
||||||
|
spin_unlock_irq(&sighand->siglock);
|
||||||
|
}
|
||||||
|
|
||||||
void __noreturn do_exit(long code)
|
void __noreturn do_exit(long code)
|
||||||
{
|
{
|
||||||
struct task_struct *tsk = current;
|
struct task_struct *tsk = current;
|
||||||
int group_dead;
|
int group_dead;
|
||||||
|
|
||||||
|
synchronize_group_exit(tsk, code);
|
||||||
|
|
||||||
WARN_ON(tsk->plug);
|
WARN_ON(tsk->plug);
|
||||||
|
|
||||||
kcov_task_exit(tsk);
|
kcov_task_exit(tsk);
|
||||||
|
|||||||
@@ -1692,6 +1692,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
sig->nr_threads = 1;
|
sig->nr_threads = 1;
|
||||||
|
sig->quick_threads = 1;
|
||||||
atomic_set(&sig->live, 1);
|
atomic_set(&sig->live, 1);
|
||||||
refcount_set(&sig->sigcnt, 1);
|
refcount_set(&sig->sigcnt, 1);
|
||||||
|
|
||||||
@@ -2444,6 +2445,7 @@ static __latent_entropy struct task_struct *copy_process(
|
|||||||
__this_cpu_inc(process_counts);
|
__this_cpu_inc(process_counts);
|
||||||
} else {
|
} else {
|
||||||
current->signal->nr_threads++;
|
current->signal->nr_threads++;
|
||||||
|
current->signal->quick_threads++;
|
||||||
atomic_inc(¤t->signal->live);
|
atomic_inc(¤t->signal->live);
|
||||||
refcount_inc(¤t->signal->sigcnt);
|
refcount_inc(¤t->signal->sigcnt);
|
||||||
task_join_group_stop(p);
|
task_join_group_stop(p);
|
||||||
|
|||||||
Reference in New Issue
Block a user