sched: cleanup: refactor normalize_rt_tasks

Replace a particularly ugly ifdef with an inline and a new macro.
Also split up the function to be easier to read.

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Andi Kleen 2007-10-15 17:00:15 +02:00 committed by Ingo Molnar
parent 8cbbe86dfc
commit 3a5e4dc12f

View File

@ -75,6 +75,12 @@ unsigned long long __attribute__((weak)) sched_clock(void)
return (unsigned long long)jiffies * (1000000000 / HZ); return (unsigned long long)jiffies * (1000000000 / HZ);
} }
#ifdef CONFIG_SMP
#define is_migration_thread(p, rq) ((p) == (rq)->migration_thread)
#else
#define is_migration_thread(p, rq) 0
#endif
/* /*
* Convert user-nice values [ -20 ... 0 ... 19 ] * Convert user-nice values [ -20 ... 0 ... 19 ]
* to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
@ -6532,12 +6538,25 @@ EXPORT_SYMBOL(__might_sleep);
#endif #endif
#ifdef CONFIG_MAGIC_SYSRQ #ifdef CONFIG_MAGIC_SYSRQ
static void normalize_task(struct rq *rq, struct task_struct *p)
{
int on_rq;
update_rq_clock(rq);
on_rq = p->se.on_rq;
if (on_rq)
deactivate_task(rq, p, 0);
__setscheduler(rq, p, SCHED_NORMAL, 0);
if (on_rq) {
activate_task(rq, p, 0);
resched_task(rq->curr);
}
}
void normalize_rt_tasks(void) void normalize_rt_tasks(void)
{ {
struct task_struct *g, *p; struct task_struct *g, *p;
unsigned long flags; unsigned long flags;
struct rq *rq; struct rq *rq;
int on_rq;
read_lock_irq(&tasklist_lock); read_lock_irq(&tasklist_lock);
do_each_thread(g, p) { do_each_thread(g, p) {
@ -6561,26 +6580,10 @@ void normalize_rt_tasks(void)
spin_lock_irqsave(&p->pi_lock, flags); spin_lock_irqsave(&p->pi_lock, flags);
rq = __task_rq_lock(p); rq = __task_rq_lock(p);
#ifdef CONFIG_SMP
/*
* Do not touch the migration thread:
*/
if (p == rq->migration_thread)
goto out_unlock;
#endif
update_rq_clock(rq); if (!is_migration_thread(p, rq))
on_rq = p->se.on_rq; normalize_task(rq, p);
if (on_rq)
deactivate_task(rq, p, 0);
__setscheduler(rq, p, SCHED_NORMAL, 0);
if (on_rq) {
activate_task(rq, p, 0);
resched_task(rq->curr);
}
#ifdef CONFIG_SMP
out_unlock:
#endif
__task_rq_unlock(rq); __task_rq_unlock(rq);
spin_unlock_irqrestore(&p->pi_lock, flags); spin_unlock_irqrestore(&p->pi_lock, flags);
} while_each_thread(g, p); } while_each_thread(g, p);