forked from Minki/linux
sched: Change the sched_class::set_cpus_allowed() calling context
Change the calling context of sched_class::set_cpus_allowed() such that we can assume the task is inactive. This allows us to easily make changes that affect accounting done by enqueue/dequeue. This does in fact completely remove set_cpus_allowed_rt() and greatly reduces set_cpus_allowed_dl(). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: dedekind1@gmail.com Cc: juri.lelli@arm.com Cc: mgorman@suse.de Cc: riel@redhat.com Cc: rostedt@goodmis.org Link: http://lkml.kernel.org/r/20150515154833.667516139@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
c5b2803840
commit
6c37067e27
@ -1163,8 +1163,31 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma
|
||||
|
||||
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
|
||||
{
|
||||
struct rq *rq = task_rq(p);
|
||||
bool queued, running;
|
||||
|
||||
lockdep_assert_held(&p->pi_lock);
|
||||
|
||||
queued = task_on_rq_queued(p);
|
||||
running = task_current(rq, p);
|
||||
|
||||
if (queued) {
|
||||
/*
|
||||
* Because __kthread_bind() calls this on blocked tasks without
|
||||
* holding rq->lock.
|
||||
*/
|
||||
lockdep_assert_held(&rq->lock);
|
||||
dequeue_task(rq, p, 0);
|
||||
}
|
||||
if (running)
|
||||
put_prev_task(rq, p);
|
||||
|
||||
p->sched_class->set_cpus_allowed(p, new_mask);
|
||||
|
||||
if (running)
|
||||
p->sched_class->set_curr_task(rq);
|
||||
if (queued)
|
||||
enqueue_task(rq, p, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1668,9 +1668,8 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p)
|
||||
static void set_cpus_allowed_dl(struct task_struct *p,
|
||||
const struct cpumask *new_mask)
|
||||
{
|
||||
struct rq *rq;
|
||||
struct root_domain *src_rd;
|
||||
int weight;
|
||||
struct rq *rq;
|
||||
|
||||
BUG_ON(!dl_task(p));
|
||||
|
||||
@ -1696,41 +1695,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
|
||||
raw_spin_unlock(&src_dl_b->lock);
|
||||
}
|
||||
|
||||
weight = cpumask_weight(new_mask);
|
||||
|
||||
/*
|
||||
* Only update if the process changes its state from whether it
|
||||
* can migrate or not.
|
||||
*/
|
||||
if ((p->nr_cpus_allowed > 1) == (weight > 1))
|
||||
goto done;
|
||||
|
||||
/*
|
||||
* Update only if the task is actually running (i.e.,
|
||||
* it is on the rq AND it is not throttled).
|
||||
*/
|
||||
if (!on_dl_rq(&p->dl))
|
||||
goto done;
|
||||
|
||||
/*
|
||||
* The process used to be able to migrate OR it can now migrate
|
||||
*/
|
||||
if (weight <= 1) {
|
||||
if (!task_current(rq, p))
|
||||
dequeue_pushable_dl_task(rq, p);
|
||||
BUG_ON(!rq->dl.dl_nr_migratory);
|
||||
rq->dl.dl_nr_migratory--;
|
||||
} else {
|
||||
if (!task_current(rq, p))
|
||||
enqueue_pushable_dl_task(rq, p);
|
||||
rq->dl.dl_nr_migratory++;
|
||||
}
|
||||
|
||||
update_dl_migration(&rq->dl);
|
||||
|
||||
done:
|
||||
cpumask_copy(&p->cpus_allowed, new_mask);
|
||||
p->nr_cpus_allowed = weight;
|
||||
set_cpus_allowed_common(p, new_mask);
|
||||
}
|
||||
|
||||
/* Assumes rq->lock is held */
|
||||
|
@ -2076,49 +2076,6 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
|
||||
push_rt_tasks(rq);
|
||||
}
|
||||
|
||||
static void set_cpus_allowed_rt(struct task_struct *p,
|
||||
const struct cpumask *new_mask)
|
||||
{
|
||||
struct rq *rq;
|
||||
int weight;
|
||||
|
||||
BUG_ON(!rt_task(p));
|
||||
|
||||
weight = cpumask_weight(new_mask);
|
||||
|
||||
/*
|
||||
* Only update if the process changes its state from whether it
|
||||
* can migrate or not.
|
||||
*/
|
||||
if ((p->nr_cpus_allowed > 1) == (weight > 1))
|
||||
goto done;
|
||||
|
||||
if (!task_on_rq_queued(p))
|
||||
goto done;
|
||||
|
||||
rq = task_rq(p);
|
||||
|
||||
/*
|
||||
* The process used to be able to migrate OR it can now migrate
|
||||
*/
|
||||
if (weight <= 1) {
|
||||
if (!task_current(rq, p))
|
||||
dequeue_pushable_task(rq, p);
|
||||
BUG_ON(!rq->rt.rt_nr_migratory);
|
||||
rq->rt.rt_nr_migratory--;
|
||||
} else {
|
||||
if (!task_current(rq, p))
|
||||
enqueue_pushable_task(rq, p);
|
||||
rq->rt.rt_nr_migratory++;
|
||||
}
|
||||
|
||||
update_rt_migration(&rq->rt);
|
||||
|
||||
done:
|
||||
cpumask_copy(&p->cpus_allowed, new_mask);
|
||||
p->nr_cpus_allowed = weight;
|
||||
}
|
||||
|
||||
/* Assumes rq->lock is held */
|
||||
static void rq_online_rt(struct rq *rq)
|
||||
{
|
||||
@ -2327,7 +2284,7 @@ const struct sched_class rt_sched_class = {
|
||||
#ifdef CONFIG_SMP
|
||||
.select_task_rq = select_task_rq_rt,
|
||||
|
||||
.set_cpus_allowed = set_cpus_allowed_rt,
|
||||
.set_cpus_allowed = set_cpus_allowed_common,
|
||||
.rq_online = rq_online_rt,
|
||||
.rq_offline = rq_offline_rt,
|
||||
.task_woken = task_woken_rt,
|
||||
|
Loading…
Reference in New Issue
Block a user