forked from Minki/linux
sched: Fix cross-sched-class wakeup preemption
Instead of dealing with sched classes inside each check_preempt_curr() implementation, pull out this logic into the generic wakeup preemption path. This fixes a hang in KVM (and others) where we are waiting for the stop machine thread to run ... Reported-by: Markus Trippelsdorf <markus@trippelsdorf.de> Tested-by: Marcelo Tosatti <mtosatti@redhat.com> Tested-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1288891946.2039.31.camel@laptop> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
2d46709082
commit
1e5a74059f
@ -560,18 +560,8 @@ struct rq {
|
||||
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
||||
|
||||
static inline
|
||||
void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
|
||||
{
|
||||
rq->curr->sched_class->check_preempt_curr(rq, p, flags);
|
||||
|
||||
/*
|
||||
* A queue event has occurred, and we're going to schedule. In
|
||||
* this case, we can save a useless back to back clock update.
|
||||
*/
|
||||
if (test_tsk_need_resched(p))
|
||||
rq->skip_clock_update = 1;
|
||||
}
|
||||
static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
|
||||
|
||||
static inline int cpu_of(struct rq *rq)
|
||||
{
|
||||
@ -2118,6 +2108,31 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
|
||||
p->sched_class->prio_changed(rq, p, oldprio, running);
|
||||
}
|
||||
|
||||
static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
|
||||
{
|
||||
const struct sched_class *class;
|
||||
|
||||
if (p->sched_class == rq->curr->sched_class) {
|
||||
rq->curr->sched_class->check_preempt_curr(rq, p, flags);
|
||||
} else {
|
||||
for_each_class(class) {
|
||||
if (class == rq->curr->sched_class)
|
||||
break;
|
||||
if (class == p->sched_class) {
|
||||
resched_task(rq->curr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* A queue event has occurred, and we're going to schedule. In
|
||||
* this case, we can save a useless back to back clock update.
|
||||
*/
|
||||
if (test_tsk_need_resched(rq->curr))
|
||||
rq->skip_clock_update = 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Is this task likely cache-hot:
|
||||
|
@ -1654,12 +1654,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
|
||||
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
|
||||
int scale = cfs_rq->nr_running >= sched_nr_latency;
|
||||
|
||||
if (unlikely(rt_prio(p->prio)))
|
||||
goto preempt;
|
||||
|
||||
if (unlikely(p->sched_class != &fair_sched_class))
|
||||
return;
|
||||
|
||||
if (unlikely(se == pse))
|
||||
return;
|
||||
|
||||
|
@ -19,7 +19,7 @@ select_task_rq_stop(struct rq *rq, struct task_struct *p,
|
||||
static void
|
||||
check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
|
||||
{
|
||||
resched_task(rq->curr); /* we preempt everything */
|
||||
/* we're never preempted */
|
||||
}
|
||||
|
||||
static struct task_struct *pick_next_task_stop(struct rq *rq)
|
||||
|
Loading…
Reference in New Issue
Block a user