Merge branch 'linus' into perfcounters/core-v2
Merge reason: we have gathered quite a few conflicts, need to merge upstream Conflicts: arch/powerpc/kernel/Makefile arch/x86/ia32/ia32entry.S arch/x86/include/asm/hardirq.h arch/x86/include/asm/unistd_32.h arch/x86/include/asm/unistd_64.h arch/x86/kernel/cpu/common.c arch/x86/kernel/irq.c arch/x86/kernel/syscall_table_32.S arch/x86/mm/iomap_32.c include/linux/sched.h kernel/Makefile Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
245
kernel/exit.c
245
kernel/exit.c
@@ -46,6 +46,7 @@
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/task_io_accounting_ops.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <linux/fs_struct.h>
|
||||
#include <linux/init_task.h>
|
||||
#include <trace/sched.h>
|
||||
|
||||
@@ -61,11 +62,6 @@ DEFINE_TRACE(sched_process_wait);
|
||||
|
||||
static void exit_mm(struct task_struct * tsk);
|
||||
|
||||
static inline int task_detached(struct task_struct *p)
|
||||
{
|
||||
return p->exit_signal == -1;
|
||||
}
|
||||
|
||||
static void __unhash_process(struct task_struct *p)
|
||||
{
|
||||
nr_threads--;
|
||||
@@ -365,16 +361,12 @@ static void reparent_to_kthreadd(void)
|
||||
void __set_special_pids(struct pid *pid)
|
||||
{
|
||||
struct task_struct *curr = current->group_leader;
|
||||
pid_t nr = pid_nr(pid);
|
||||
|
||||
if (task_session(curr) != pid) {
|
||||
if (task_session(curr) != pid)
|
||||
change_pid(curr, PIDTYPE_SID, pid);
|
||||
set_task_session(curr, nr);
|
||||
}
|
||||
if (task_pgrp(curr) != pid) {
|
||||
|
||||
if (task_pgrp(curr) != pid)
|
||||
change_pid(curr, PIDTYPE_PGID, pid);
|
||||
set_task_pgrp(curr, nr);
|
||||
}
|
||||
}
|
||||
|
||||
static void set_special_pids(struct pid *pid)
|
||||
@@ -432,7 +424,6 @@ EXPORT_SYMBOL(disallow_signal);
|
||||
void daemonize(const char *name, ...)
|
||||
{
|
||||
va_list args;
|
||||
struct fs_struct *fs;
|
||||
sigset_t blocked;
|
||||
|
||||
va_start(args, name);
|
||||
@@ -465,11 +456,7 @@ void daemonize(const char *name, ...)
|
||||
|
||||
/* Become as one with the init task */
|
||||
|
||||
exit_fs(current); /* current->fs->count--; */
|
||||
fs = init_task.fs;
|
||||
current->fs = fs;
|
||||
atomic_inc(&fs->count);
|
||||
|
||||
daemonize_fs_struct();
|
||||
exit_files(current);
|
||||
current->files = init_task.files;
|
||||
atomic_inc(¤t->files->count);
|
||||
@@ -568,30 +555,6 @@ void exit_files(struct task_struct *tsk)
|
||||
}
|
||||
}
|
||||
|
||||
void put_fs_struct(struct fs_struct *fs)
|
||||
{
|
||||
/* No need to hold fs->lock if we are killing it */
|
||||
if (atomic_dec_and_test(&fs->count)) {
|
||||
path_put(&fs->root);
|
||||
path_put(&fs->pwd);
|
||||
kmem_cache_free(fs_cachep, fs);
|
||||
}
|
||||
}
|
||||
|
||||
void exit_fs(struct task_struct *tsk)
|
||||
{
|
||||
struct fs_struct * fs = tsk->fs;
|
||||
|
||||
if (fs) {
|
||||
task_lock(tsk);
|
||||
tsk->fs = NULL;
|
||||
task_unlock(tsk);
|
||||
put_fs_struct(fs);
|
||||
}
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(exit_fs);
|
||||
|
||||
#ifdef CONFIG_MM_OWNER
|
||||
/*
|
||||
* Task p is exiting and it owned mm, lets find a new owner for it
|
||||
@@ -734,119 +697,6 @@ static void exit_mm(struct task_struct * tsk)
|
||||
mmput(mm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return nonzero if @parent's children should reap themselves.
|
||||
*
|
||||
* Called with write_lock_irq(&tasklist_lock) held.
|
||||
*/
|
||||
static int ignoring_children(struct task_struct *parent)
|
||||
{
|
||||
int ret;
|
||||
struct sighand_struct *psig = parent->sighand;
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&psig->siglock, flags);
|
||||
ret = (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
|
||||
(psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT));
|
||||
spin_unlock_irqrestore(&psig->siglock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Detach all tasks we were using ptrace on.
|
||||
* Any that need to be release_task'd are put on the @dead list.
|
||||
*
|
||||
* Called with write_lock(&tasklist_lock) held.
|
||||
*/
|
||||
static void ptrace_exit(struct task_struct *parent, struct list_head *dead)
|
||||
{
|
||||
struct task_struct *p, *n;
|
||||
int ign = -1;
|
||||
|
||||
list_for_each_entry_safe(p, n, &parent->ptraced, ptrace_entry) {
|
||||
__ptrace_unlink(p);
|
||||
|
||||
if (p->exit_state != EXIT_ZOMBIE)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* If it's a zombie, our attachedness prevented normal
|
||||
* parent notification or self-reaping. Do notification
|
||||
* now if it would have happened earlier. If it should
|
||||
* reap itself, add it to the @dead list. We can't call
|
||||
* release_task() here because we already hold tasklist_lock.
|
||||
*
|
||||
* If it's our own child, there is no notification to do.
|
||||
* But if our normal children self-reap, then this child
|
||||
* was prevented by ptrace and we must reap it now.
|
||||
*/
|
||||
if (!task_detached(p) && thread_group_empty(p)) {
|
||||
if (!same_thread_group(p->real_parent, parent))
|
||||
do_notify_parent(p, p->exit_signal);
|
||||
else {
|
||||
if (ign < 0)
|
||||
ign = ignoring_children(parent);
|
||||
if (ign)
|
||||
p->exit_signal = -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (task_detached(p)) {
|
||||
/*
|
||||
* Mark it as in the process of being reaped.
|
||||
*/
|
||||
p->exit_state = EXIT_DEAD;
|
||||
list_add(&p->ptrace_entry, dead);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Finish up exit-time ptrace cleanup.
|
||||
*
|
||||
* Called without locks.
|
||||
*/
|
||||
static void ptrace_exit_finish(struct task_struct *parent,
|
||||
struct list_head *dead)
|
||||
{
|
||||
struct task_struct *p, *n;
|
||||
|
||||
BUG_ON(!list_empty(&parent->ptraced));
|
||||
|
||||
list_for_each_entry_safe(p, n, dead, ptrace_entry) {
|
||||
list_del_init(&p->ptrace_entry);
|
||||
release_task(p);
|
||||
}
|
||||
}
|
||||
|
||||
static void reparent_thread(struct task_struct *p, struct task_struct *father)
|
||||
{
|
||||
if (p->pdeath_signal)
|
||||
/* We already hold the tasklist_lock here. */
|
||||
group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
|
||||
|
||||
list_move_tail(&p->sibling, &p->real_parent->children);
|
||||
|
||||
/* If this is a threaded reparent there is no need to
|
||||
* notify anyone anything has happened.
|
||||
*/
|
||||
if (same_thread_group(p->real_parent, father))
|
||||
return;
|
||||
|
||||
/* We don't want people slaying init. */
|
||||
if (!task_detached(p))
|
||||
p->exit_signal = SIGCHLD;
|
||||
|
||||
/* If we'd notified the old parent about this child's death,
|
||||
* also notify the new parent.
|
||||
*/
|
||||
if (!ptrace_reparented(p) &&
|
||||
p->exit_state == EXIT_ZOMBIE &&
|
||||
!task_detached(p) && thread_group_empty(p))
|
||||
do_notify_parent(p, p->exit_signal);
|
||||
|
||||
kill_orphaned_pgrp(p, father);
|
||||
}
|
||||
|
||||
/*
|
||||
* When we die, we re-parent all our children.
|
||||
* Try to give them to another thread in our thread
|
||||
@@ -886,17 +736,51 @@ static struct task_struct *find_new_reaper(struct task_struct *father)
|
||||
return pid_ns->child_reaper;
|
||||
}
|
||||
|
||||
/*
|
||||
* Any that need to be release_task'd are put on the @dead list.
|
||||
*/
|
||||
static void reparent_thread(struct task_struct *father, struct task_struct *p,
|
||||
struct list_head *dead)
|
||||
{
|
||||
if (p->pdeath_signal)
|
||||
group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
|
||||
|
||||
list_move_tail(&p->sibling, &p->real_parent->children);
|
||||
|
||||
if (task_detached(p))
|
||||
return;
|
||||
/*
|
||||
* If this is a threaded reparent there is no need to
|
||||
* notify anyone anything has happened.
|
||||
*/
|
||||
if (same_thread_group(p->real_parent, father))
|
||||
return;
|
||||
|
||||
/* We don't want people slaying init. */
|
||||
p->exit_signal = SIGCHLD;
|
||||
|
||||
/* If it has exited notify the new parent about this child's death. */
|
||||
if (!p->ptrace &&
|
||||
p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
|
||||
do_notify_parent(p, p->exit_signal);
|
||||
if (task_detached(p)) {
|
||||
p->exit_state = EXIT_DEAD;
|
||||
list_move_tail(&p->sibling, dead);
|
||||
}
|
||||
}
|
||||
|
||||
kill_orphaned_pgrp(p, father);
|
||||
}
|
||||
|
||||
static void forget_original_parent(struct task_struct *father)
|
||||
{
|
||||
struct task_struct *p, *n, *reaper;
|
||||
LIST_HEAD(ptrace_dead);
|
||||
LIST_HEAD(dead_children);
|
||||
|
||||
exit_ptrace(father);
|
||||
|
||||
write_lock_irq(&tasklist_lock);
|
||||
reaper = find_new_reaper(father);
|
||||
/*
|
||||
* First clean up ptrace if we were using it.
|
||||
*/
|
||||
ptrace_exit(father, &ptrace_dead);
|
||||
|
||||
list_for_each_entry_safe(p, n, &father->children, sibling) {
|
||||
p->real_parent = reaper;
|
||||
@@ -904,13 +788,16 @@ static void forget_original_parent(struct task_struct *father)
|
||||
BUG_ON(p->ptrace);
|
||||
p->parent = p->real_parent;
|
||||
}
|
||||
reparent_thread(p, father);
|
||||
reparent_thread(father, p, &dead_children);
|
||||
}
|
||||
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
|
||||
BUG_ON(!list_empty(&father->children));
|
||||
|
||||
ptrace_exit_finish(father, &ptrace_dead);
|
||||
list_for_each_entry_safe(p, n, &dead_children, sibling) {
|
||||
list_del_init(&p->sibling);
|
||||
release_task(p);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1422,6 +1309,18 @@ static int wait_task_zombie(struct task_struct *p, int options,
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int *task_stopped_code(struct task_struct *p, bool ptrace)
|
||||
{
|
||||
if (ptrace) {
|
||||
if (task_is_stopped_or_traced(p))
|
||||
return &p->exit_code;
|
||||
} else {
|
||||
if (p->signal->flags & SIGNAL_STOP_STOPPED)
|
||||
return &p->signal->group_exit_code;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle sys_wait4 work for one task in state TASK_STOPPED. We hold
|
||||
* read_lock(&tasklist_lock) on entry. If we return zero, we still hold
|
||||
@@ -1432,7 +1331,7 @@ static int wait_task_stopped(int ptrace, struct task_struct *p,
|
||||
int options, struct siginfo __user *infop,
|
||||
int __user *stat_addr, struct rusage __user *ru)
|
||||
{
|
||||
int retval, exit_code, why;
|
||||
int retval, exit_code, *p_code, why;
|
||||
uid_t uid = 0; /* unneeded, required by compiler */
|
||||
pid_t pid;
|
||||
|
||||
@@ -1442,22 +1341,16 @@ static int wait_task_stopped(int ptrace, struct task_struct *p,
|
||||
exit_code = 0;
|
||||
spin_lock_irq(&p->sighand->siglock);
|
||||
|
||||
if (unlikely(!task_is_stopped_or_traced(p)))
|
||||
p_code = task_stopped_code(p, ptrace);
|
||||
if (unlikely(!p_code))
|
||||
goto unlock_sig;
|
||||
|
||||
if (!ptrace && p->signal->group_stop_count > 0)
|
||||
/*
|
||||
* A group stop is in progress and this is the group leader.
|
||||
* We won't report until all threads have stopped.
|
||||
*/
|
||||
goto unlock_sig;
|
||||
|
||||
exit_code = p->exit_code;
|
||||
exit_code = *p_code;
|
||||
if (!exit_code)
|
||||
goto unlock_sig;
|
||||
|
||||
if (!unlikely(options & WNOWAIT))
|
||||
p->exit_code = 0;
|
||||
*p_code = 0;
|
||||
|
||||
/* don't need the RCU readlock here as we're holding a spinlock */
|
||||
uid = __task_cred(p)->uid;
|
||||
@@ -1613,7 +1506,7 @@ static int wait_consider_task(struct task_struct *parent, int ptrace,
|
||||
*/
|
||||
*notask_error = 0;
|
||||
|
||||
if (task_is_stopped_or_traced(p))
|
||||
if (task_stopped_code(p, ptrace))
|
||||
return wait_task_stopped(ptrace, p, options,
|
||||
infop, stat_addr, ru);
|
||||
|
||||
@@ -1817,7 +1710,7 @@ SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
|
||||
pid = find_get_pid(-upid);
|
||||
} else if (upid == 0) {
|
||||
type = PIDTYPE_PGID;
|
||||
pid = get_pid(task_pgrp(current));
|
||||
pid = get_task_pid(current, PIDTYPE_PGID);
|
||||
} else /* upid > 0 */ {
|
||||
type = PIDTYPE_PID;
|
||||
pid = find_get_pid(upid);
|
||||
|
||||
Reference in New Issue
Block a user