sched: Add wrapper for checking task_struct::on_rq
Implement task_on_rq_queued() and use it everywhere instead of on_rq check. No functional changes. The only exception is we do not use the wrapper in check_for_tasks(), because it requires to export task_on_rq_queued() in global header files. Next patch in series would return it back, so we do not twist it from here to there. Signed-off-by: Kirill Tkhai <ktkhai@parallels.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Paul Turner <pjt@google.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Mike Galbraith <umgwanakikbuti@gmail.com> Cc: Kirill Tkhai <tkhai@yandex.ru> Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: Nicolas Pitre <nicolas.pitre@linaro.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1408528052.23412.87.camel@tkhai Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
committed by
Ingo Molnar
parent
f36c019c79
commit
da0c1e65b5
@@ -1448,7 +1448,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
|
||||
* means a dl or stop task can slip in, in which case we need
|
||||
* to re-start task selection.
|
||||
*/
|
||||
if (unlikely((rq->stop && rq->stop->on_rq) ||
|
||||
if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
|
||||
rq->dl.dl_nr_running))
|
||||
return RETRY_TASK;
|
||||
}
|
||||
@@ -1624,7 +1624,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
|
||||
!cpumask_test_cpu(lowest_rq->cpu,
|
||||
tsk_cpus_allowed(task)) ||
|
||||
task_running(rq, task) ||
|
||||
!task->on_rq)) {
|
||||
!task_on_rq_queued(task))) {
|
||||
|
||||
double_unlock_balance(rq, lowest_rq);
|
||||
lowest_rq = NULL;
|
||||
@@ -1658,7 +1658,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
|
||||
BUG_ON(task_current(rq, p));
|
||||
BUG_ON(p->nr_cpus_allowed <= 1);
|
||||
|
||||
BUG_ON(!p->on_rq);
|
||||
BUG_ON(!task_on_rq_queued(p));
|
||||
BUG_ON(!rt_task(p));
|
||||
|
||||
return p;
|
||||
@@ -1809,7 +1809,7 @@ static int pull_rt_task(struct rq *this_rq)
|
||||
*/
|
||||
if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
|
||||
WARN_ON(p == src_rq->curr);
|
||||
WARN_ON(!p->on_rq);
|
||||
WARN_ON(!task_on_rq_queued(p));
|
||||
|
||||
/*
|
||||
* There's a chance that p is higher in priority
|
||||
@@ -1870,7 +1870,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
|
||||
|
||||
BUG_ON(!rt_task(p));
|
||||
|
||||
if (!p->on_rq)
|
||||
if (!task_on_rq_queued(p))
|
||||
return;
|
||||
|
||||
weight = cpumask_weight(new_mask);
|
||||
@@ -1936,7 +1936,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
|
||||
* we may need to handle the pulling of RT tasks
|
||||
* now.
|
||||
*/
|
||||
if (!p->on_rq || rq->rt.rt_nr_running)
|
||||
if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
|
||||
return;
|
||||
|
||||
if (pull_rt_task(rq))
|
||||
@@ -1970,7 +1970,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
|
||||
* If that current running task is also an RT task
|
||||
* then see if we can move to another run queue.
|
||||
*/
|
||||
if (p->on_rq && rq->curr != p) {
|
||||
if (task_on_rq_queued(p) && rq->curr != p) {
|
||||
#ifdef CONFIG_SMP
|
||||
if (p->nr_cpus_allowed > 1 && rq->rt.overloaded &&
|
||||
/* Don't resched if we changed runqueues */
|
||||
@@ -1989,7 +1989,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
|
||||
static void
|
||||
prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
|
||||
{
|
||||
if (!p->on_rq)
|
||||
if (!task_on_rq_queued(p))
|
||||
return;
|
||||
|
||||
if (rq->curr == p) {
|
||||
|
||||
Reference in New Issue
Block a user