forked from Minki/linux
sched: Move sched_avg_update() to update_cpu_load()
Currently sched_avg_update() (which updates rt_avg stats in the rq) is getting called from scale_rt_power() (in the load balance context) which doesn't take rq->lock. Fix it by moving the sched_avg_update() to more appropriate update_cpu_load() where the CFS load gets updated as well. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1282596171.2694.3.camel@sbsiddha-MOBL3> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
d56557af19
commit
da2b71edd8
@ -1294,6 +1294,10 @@ static void resched_task(struct task_struct *p)
|
||||
static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
|
||||
{
|
||||
}
|
||||
|
||||
static void sched_avg_update(struct rq *rq)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
@ -3182,6 +3186,8 @@ static void update_cpu_load(struct rq *this_rq)
|
||||
|
||||
this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
|
||||
}
|
||||
|
||||
sched_avg_update(this_rq);
|
||||
}
|
||||
|
||||
static void update_cpu_load_active(struct rq *this_rq)
|
||||
|
@ -2268,8 +2268,6 @@ unsigned long scale_rt_power(int cpu)
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
u64 total, available;
|
||||
|
||||
sched_avg_update(rq);
|
||||
|
||||
total = sched_avg_period() + (rq->clock - rq->age_stamp);
|
||||
available = total - rq->rt_avg;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user