sched: s/do_each_thread/for_each_process_thread/ in core.c

Change kernel/sched/core.c to use for_each_process_thread().

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Cc: Frank Mayhar <fmayhar@google.com>
Cc: Frederic Weisbecker <fweisbec@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Sanjay Rao <srao@redhat.com>
Cc: Larry Woodman <lwoodman@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20140813191953.GA19315@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Oleg Nesterov 2014-08-13 21:19:53 +02:00 committed by Ingo Molnar
parent 83d7f24247
commit 5d07f4202c

View File

@ -4505,7 +4505,7 @@ void show_state_filter(unsigned long state_filter)
" task PC stack pid father\n"); " task PC stack pid father\n");
#endif #endif
rcu_read_lock(); rcu_read_lock();
do_each_thread(g, p) { for_each_process_thread(g, p) {
/* /*
* reset the NMI-timeout, listing all files on a slow * reset the NMI-timeout, listing all files on a slow
* console might take a lot of time: * console might take a lot of time:
@ -4513,7 +4513,7 @@ void show_state_filter(unsigned long state_filter)
touch_nmi_watchdog(); touch_nmi_watchdog();
if (!state_filter || (p->state & state_filter)) if (!state_filter || (p->state & state_filter))
sched_show_task(p); sched_show_task(p);
} while_each_thread(g, p); }
touch_all_softlockup_watchdogs(); touch_all_softlockup_watchdogs();
@ -7137,7 +7137,7 @@ void normalize_rt_tasks(void)
struct rq *rq; struct rq *rq;
read_lock_irqsave(&tasklist_lock, flags); read_lock_irqsave(&tasklist_lock, flags);
do_each_thread(g, p) { for_each_process_thread(g, p) {
/* /*
* Only normalize user tasks: * Only normalize user tasks:
*/ */
@ -7168,8 +7168,7 @@ void normalize_rt_tasks(void)
__task_rq_unlock(rq); __task_rq_unlock(rq);
raw_spin_unlock(&p->pi_lock); raw_spin_unlock(&p->pi_lock);
} while_each_thread(g, p); }
read_unlock_irqrestore(&tasklist_lock, flags); read_unlock_irqrestore(&tasklist_lock, flags);
} }
@ -7357,10 +7356,10 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
{ {
struct task_struct *g, *p; struct task_struct *g, *p;
do_each_thread(g, p) { for_each_process_thread(g, p) {
if (rt_task(p) && task_rq(p)->rt.tg == tg) if (rt_task(p) && task_rq(p)->rt.tg == tg)
return 1; return 1;
} while_each_thread(g, p); }
return 0; return 0;
} }