mirror of
https://github.com/torvalds/linux.git
synced 2024-12-26 04:42:12 +00:00
sched: fix accounting in task delay accounting & migration
On Thu, Jun 19, 2008 at 12:27:14PM +0200, Peter Zijlstra wrote: > On Thu, 2008-06-05 at 10:50 +0530, Ankita Garg wrote: > > > Thanks Peter for the explanation... > > > > I agree with the above and that is the reason why I did not see weird > > values with cpu_time. But, run_delay still would suffer skews as the end > > points for delta could be taken on different cpus due to migration (more > > so on RT kernel due to the push-pull operations). With the below patch, > > I could not reproduce the issue I had seen earlier. After every dequeue, > > we take the delta and start wait measurements from zero when moved to a > > different rq. > > OK, so task delay delay accounting is broken because it doesn't take > migration into account. > > What you've done is make it symmetric wrt enqueue, and account it like > > cpu0 cpu1 > > enqueue > <wait-d1> > dequeue > enqueue > <wait-d2> > run > > Where you add both d1 and d2 to the run_delay,.. right? > Thanks for reviewing the patch. The above is exactly what I have done. > This seems like a good fix, however it looks like the patch will break > compilation in !CONFIG_SCHEDSTATS && !CONFIG_TASK_DELAY_ACCT, of it > failing to provide a stub for sched_info_dequeue() in that case. Fixed. Pl. find the new patch below. Signed-off-by: Ankita Garg <ankita@in.ibm.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Gregory Haskins <ghaskins@novell.com> Cc: rostedt@goodmis.org Cc: suresh.b.siddha@intel.com Cc: aneesh.kumar@linux.vnet.ibm.com Cc: dhaval@linux.vnet.ibm.com Cc: vatsa@linux.vnet.ibm.com Cc: David Bahi <DBahi@novell.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
2087a1ad82
commit
46ac22bab4
@ -1714,6 +1714,7 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
|
||||
p->se.last_wakeup = 0;
|
||||
}
|
||||
|
||||
sched_info_dequeued(p);
|
||||
p->sched_class->dequeue_task(rq, p, sleep);
|
||||
p->se.on_rq = 0;
|
||||
}
|
||||
|
@ -118,6 +118,13 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
|
||||
if (rq)
|
||||
rq->rq_sched_info.cpu_time += delta;
|
||||
}
|
||||
|
||||
static inline void
|
||||
rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
|
||||
{
|
||||
if (rq)
|
||||
rq->rq_sched_info.run_delay += delta;
|
||||
}
|
||||
# define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
|
||||
# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
|
||||
# define schedstat_set(var, val) do { var = (val); } while (0)
|
||||
@ -126,6 +133,9 @@ static inline void
|
||||
rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
|
||||
{}
|
||||
static inline void
|
||||
rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
|
||||
{}
|
||||
static inline void
|
||||
rq_sched_info_depart(struct rq *rq, unsigned long long delta)
|
||||
{}
|
||||
# define schedstat_inc(rq, field) do { } while (0)
|
||||
@ -134,6 +144,11 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
|
||||
static inline void sched_info_reset_dequeued(struct task_struct *t)
|
||||
{
|
||||
t->sched_info.last_queued = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called when a process is dequeued from the active array and given
|
||||
* the cpu. We should note that with the exception of interactive
|
||||
@ -143,15 +158,22 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
|
||||
* active queue, thus delaying tasks in the expired queue from running;
|
||||
* see scheduler_tick()).
|
||||
*
|
||||
* This function is only called from sched_info_arrive(), rather than
|
||||
* dequeue_task(). Even though a task may be queued and dequeued multiple
|
||||
* times as it is shuffled about, we're really interested in knowing how
|
||||
* long it was from the *first* time it was queued to the time that it
|
||||
* finally hit a cpu.
|
||||
* Though we are interested in knowing how long it was from the *first* time a
|
||||
* task was queued to the time that it finally hit a cpu, we call this routine
|
||||
* from dequeue_task() to account for possible rq->clock skew across cpus. The
|
||||
* delta taken on each cpu would annul the skew.
|
||||
*/
|
||||
static inline void sched_info_dequeued(struct task_struct *t)
|
||||
{
|
||||
t->sched_info.last_queued = 0;
|
||||
unsigned long long now = task_rq(t)->clock, delta = 0;
|
||||
|
||||
if (unlikely(sched_info_on()))
|
||||
if (t->sched_info.last_queued)
|
||||
delta = now - t->sched_info.last_queued;
|
||||
sched_info_reset_dequeued(t);
|
||||
t->sched_info.run_delay += delta;
|
||||
|
||||
rq_sched_info_dequeued(task_rq(t), delta);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -165,7 +187,7 @@ static void sched_info_arrive(struct task_struct *t)
|
||||
|
||||
if (t->sched_info.last_queued)
|
||||
delta = now - t->sched_info.last_queued;
|
||||
sched_info_dequeued(t);
|
||||
sched_info_reset_dequeued(t);
|
||||
t->sched_info.run_delay += delta;
|
||||
t->sched_info.last_arrival = now;
|
||||
t->sched_info.pcount++;
|
||||
@ -242,7 +264,9 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
|
||||
__sched_info_switch(prev, next);
|
||||
}
|
||||
#else
|
||||
#define sched_info_queued(t) do { } while (0)
|
||||
#define sched_info_switch(t, next) do { } while (0)
|
||||
#define sched_info_queued(t) do { } while (0)
|
||||
#define sched_info_reset_dequeued(t) do { } while (0)
|
||||
#define sched_info_dequeued(t) do { } while (0)
|
||||
#define sched_info_switch(t, next) do { } while (0)
|
||||
#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user