coredump: elf_core_dump: use core_state->dumper list

Kill the nasty rcu_read_lock() + do_each_thread() loop, use the list
encoded in mm->core_state instead, s/GFP_ATOMIC/GFP_KERNEL/.

This patch allows futher cleanups in binfmt_elf.c, in particular we can
kill the parallel info->threads list.

Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Acked-by: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Oleg Nesterov 2008-07-25 01:47:45 -07:00 committed by Linus Torvalds
parent b564daf806
commit 83914441f9

View File

@ -1478,7 +1478,7 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
const struct user_regset_view *view = task_user_regset_view(dump_task); const struct user_regset_view *view = task_user_regset_view(dump_task);
struct elf_thread_core_info *t; struct elf_thread_core_info *t;
struct elf_prpsinfo *psinfo; struct elf_prpsinfo *psinfo;
struct task_struct *g, *p; struct core_thread *ct;
unsigned int i; unsigned int i;
info->size = 0; info->size = 0;
@ -1517,34 +1517,26 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
/* /*
* Allocate a structure for each thread. * Allocate a structure for each thread.
*/ */
rcu_read_lock(); for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
do_each_thread(g, p) t = kzalloc(offsetof(struct elf_thread_core_info,
if (p->mm == dump_task->mm) { notes[info->thread_notes]),
if (p->flags & PF_KTHREAD) GFP_KERNEL);
continue; if (unlikely(!t))
return 0;
t = kzalloc(offsetof(struct elf_thread_core_info, t->task = ct->task;
notes[info->thread_notes]), if (ct->task == dump_task || !info->thread) {
GFP_ATOMIC); t->next = info->thread;
if (unlikely(!t)) { info->thread = t;
rcu_read_unlock(); } else {
return 0; /*
} * Make sure to keep the original task at
t->task = p; * the head of the list.
if (p == dump_task || !info->thread) { */
t->next = info->thread; t->next = info->thread->next;
info->thread = t; info->thread->next = t;
} else {
/*
* Make sure to keep the original task at
* the head of the list.
*/
t->next = info->thread->next;
info->thread->next = t;
}
} }
while_each_thread(g, p); }
rcu_read_unlock();
/* /*
* Now fill in each thread's information. * Now fill in each thread's information.
@ -1691,7 +1683,6 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
{ {
#define NUM_NOTES 6 #define NUM_NOTES 6
struct list_head *t; struct list_head *t;
struct task_struct *g, *p;
info->notes = NULL; info->notes = NULL;
info->prstatus = NULL; info->prstatus = NULL;
@ -1723,23 +1714,19 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
info->thread_status_size = 0; info->thread_status_size = 0;
if (signr) { if (signr) {
struct core_thread *ct;
struct elf_thread_status *ets; struct elf_thread_status *ets;
rcu_read_lock();
do_each_thread(g, p)
if (current->mm == p->mm && current != p) {
if (p->flags & PF_KTHREAD)
continue;
ets = kzalloc(sizeof(*ets), GFP_ATOMIC); for (ct = current->mm->core_state->dumper.next;
if (!ets) { ct; ct = ct->next) {
rcu_read_unlock(); ets = kzalloc(sizeof(*ets), GFP_KERNEL);
return 0; if (!ets)
} return 0;
ets->thread = p;
list_add(&ets->list, &info->thread_list); ets->thread = ct->task;
} list_add(&ets->list, &info->thread_list);
while_each_thread(g, p); }
rcu_read_unlock();
list_for_each(t, &info->thread_list) { list_for_each(t, &info->thread_list) {
int sz; int sz;