mirror of
https://github.com/torvalds/linux.git
synced 2024-11-15 16:41:58 +00:00
sched,dl: Remove return value from pull_dl_task()
In order to be able to use pull_dl_task() from a callback, we need to do away with the return value. Since the return value indicates if we should reschedule, do this inside the function. Since not all callers currently do this, this can increase the number of reschedules due rt balancing. Too many reschedules is not a correctness issues, too few are. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: ktkhai@parallels.com Cc: rostedt@goodmis.org Cc: juri.lelli@gmail.com Cc: pang.xunlei@linaro.org Cc: oleg@redhat.com Cc: wanpeng.li@linux.intel.com Cc: umgwanakikbuti@gmail.com Link: http://lkml.kernel.org/r/20150611124742.859398977@infradead.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
fd7a4bed18
commit
0ea60c2054
@ -298,9 +298,8 @@ static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int pull_dl_task(struct rq *rq)
|
static inline void pull_dl_task(struct rq *rq)
|
||||||
{
|
{
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void queue_push_tasks(struct rq *rq)
|
static inline void queue_push_tasks(struct rq *rq)
|
||||||
@ -1041,7 +1040,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
|
|||||||
resched_curr(rq);
|
resched_curr(rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pull_dl_task(struct rq *this_rq);
|
static void pull_dl_task(struct rq *this_rq);
|
||||||
|
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
@ -1472,15 +1471,16 @@ static void push_dl_tasks(struct rq *rq)
|
|||||||
;
|
;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pull_dl_task(struct rq *this_rq)
|
static void pull_dl_task(struct rq *this_rq)
|
||||||
{
|
{
|
||||||
int this_cpu = this_rq->cpu, ret = 0, cpu;
|
int this_cpu = this_rq->cpu, cpu;
|
||||||
struct task_struct *p;
|
struct task_struct *p;
|
||||||
|
bool resched = false;
|
||||||
struct rq *src_rq;
|
struct rq *src_rq;
|
||||||
u64 dmin = LONG_MAX;
|
u64 dmin = LONG_MAX;
|
||||||
|
|
||||||
if (likely(!dl_overloaded(this_rq)))
|
if (likely(!dl_overloaded(this_rq)))
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Match the barrier from dl_set_overloaded; this guarantees that if we
|
* Match the barrier from dl_set_overloaded; this guarantees that if we
|
||||||
@ -1535,7 +1535,7 @@ static int pull_dl_task(struct rq *this_rq)
|
|||||||
src_rq->curr->dl.deadline))
|
src_rq->curr->dl.deadline))
|
||||||
goto skip;
|
goto skip;
|
||||||
|
|
||||||
ret = 1;
|
resched = true;
|
||||||
|
|
||||||
deactivate_task(src_rq, p, 0);
|
deactivate_task(src_rq, p, 0);
|
||||||
set_task_cpu(p, this_cpu);
|
set_task_cpu(p, this_cpu);
|
||||||
@ -1548,7 +1548,8 @@ skip:
|
|||||||
double_unlock_balance(this_rq, src_rq);
|
double_unlock_balance(this_rq, src_rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
if (resched)
|
||||||
|
resched_curr(this_rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1704,8 +1705,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
|
|||||||
if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
|
if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (pull_dl_task(rq))
|
pull_dl_task(rq);
|
||||||
resched_curr(rq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user