[PATCH] sched: consider migration thread with smp nice

The intermittent scheduling of the migration thread at ultra high priority
makes the smp nice handling see that runqueue as being heavily loaded.  The
migration thread itself actually handles the balancing so its influence on
priority balancing should be ignored.

Signed-off-by: Con Kolivas <kernel@kolivas.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Con Kolivas 2005-11-08 21:39:00 -08:00 committed by Linus Torvalds
parent 6dd4a85bb3
commit ede3d0fba9

View File

@ -670,6 +670,31 @@ static inline void dec_prio_bias(runqueue_t *rq, int prio)
{
rq->prio_bias -= MAX_PRIO - prio;
}
static inline void inc_nr_running(task_t *p, runqueue_t *rq)
{
rq->nr_running++;
if (rt_task(p)) {
if (p != rq->migration_thread)
/*
* The migration thread does the actual balancing. Do
* not bias by its priority as the ultra high priority
* will skew balancing adversely.
*/
inc_prio_bias(rq, p->prio);
} else
inc_prio_bias(rq, p->static_prio);
}
static inline void dec_nr_running(task_t *p, runqueue_t *rq)
{
rq->nr_running--;
if (rt_task(p)) {
if (p != rq->migration_thread)
dec_prio_bias(rq, p->prio);
} else
dec_prio_bias(rq, p->static_prio);
}
#else
static inline void inc_prio_bias(runqueue_t *rq, int prio)
{
@ -678,25 +703,17 @@ static inline void inc_prio_bias(runqueue_t *rq, int prio)
static inline void dec_prio_bias(runqueue_t *rq, int prio)
{
}
#endif
static inline void inc_nr_running(task_t *p, runqueue_t *rq)
{
rq->nr_running++;
if (rt_task(p))
inc_prio_bias(rq, p->prio);
else
inc_prio_bias(rq, p->static_prio);
}
static inline void dec_nr_running(task_t *p, runqueue_t *rq)
{
rq->nr_running--;
if (rt_task(p))
dec_prio_bias(rq, p->prio);
else
dec_prio_bias(rq, p->static_prio);
}
#endif
/*
* __activate_task - move a task to the runqueue.