[PATCH] revert "Optimize sys_times for a single thread process"

This patch reverts 'CONFIG_SMP && thread_group_empty()' optimization in
sys_times().  The reason is that the next patch breaks memory ordering which
is needed for that optimization.

tasklist_lock in sys_times() will be eliminated completely by further patch.

Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Oleg Nesterov 2006-03-28 16:11:19 -08:00 committed by Linus Torvalds
parent 6a14c5c9da
commit 35f5cad8c4
2 changed files with 26 additions and 64 deletions

View File

@ -139,11 +139,7 @@ repeat:
ptrace_unlink(p); ptrace_unlink(p);
BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
__exit_signal(p); __exit_signal(p);
/*
* Note that the fastpath in sys_times depends on __exit_signal having
* updated the counters before a task is removed from the tasklist of
* the process by __unhash_process.
*/
__unhash_process(p); __unhash_process(p);
/* /*

View File

@ -1202,43 +1202,9 @@ asmlinkage long sys_times(struct tms __user * tbuf)
*/ */
if (tbuf) { if (tbuf) {
struct tms tmp; struct tms tmp;
cputime_t utime, stime, cutime, cstime;
#ifdef CONFIG_SMP
if (thread_group_empty(current)) {
/*
* Single thread case without the use of any locks.
*
* We may race with release_task if two threads are
* executing. However, release task first adds up the
* counters (__exit_signal) before removing the task
* from the process tasklist (__unhash_process).
* __exit_signal also acquires and releases the
* siglock which results in the proper memory ordering
* so that the list modifications are always visible
* after the counters have been updated.
*
* If the counters have been updated by the second thread
* but the thread has not yet been removed from the list
* then the other branch will be executing which will
* block on tasklist_lock until the exit handling of the
* other task is finished.
*
* This also implies that the sighand->siglock cannot
* be held by another processor. So we can also
* skip acquiring that lock.
*/
utime = cputime_add(current->signal->utime, current->utime);
stime = cputime_add(current->signal->utime, current->stime);
cutime = current->signal->cutime;
cstime = current->signal->cstime;
} else
#endif
{
/* Process with multiple threads */
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct task_struct *t; struct task_struct *t;
cputime_t utime, stime, cutime, cstime;
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
utime = tsk->signal->utime; utime = tsk->signal->utime;
@ -1264,7 +1230,7 @@ asmlinkage long sys_times(struct tms __user * tbuf)
cstime = tsk->signal->cstime; cstime = tsk->signal->cstime;
spin_unlock_irq(&tsk->sighand->siglock); spin_unlock_irq(&tsk->sighand->siglock);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
}
tmp.tms_utime = cputime_to_clock_t(utime); tmp.tms_utime = cputime_to_clock_t(utime);
tmp.tms_stime = cputime_to_clock_t(stime); tmp.tms_stime = cputime_to_clock_t(stime);
tmp.tms_cutime = cputime_to_clock_t(cutime); tmp.tms_cutime = cputime_to_clock_t(cutime);