Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  sched: Fix race in cpupri introduced by cpumask_var changes
  sched: Fix latencytop and sleep profiling vs group scheduling
This commit is contained in:
Linus Torvalds 2009-08-04 15:32:22 -07:00
commit ea5634246b
2 changed files with 33 additions and 14 deletions

View File

@ -81,8 +81,21 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
continue; continue;
if (lowest_mask) if (lowest_mask) {
cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
/*
* We have to ensure that we have at least one bit
* still set in the array, since the map could have
* been concurrently emptied between the first and
* second reads of vec->mask. If we hit this
* condition, simply act as though we never hit this
* priority level and continue on.
*/
if (cpumask_any(lowest_mask) >= nr_cpu_ids)
continue;
}
return 1; return 1;
} }

View File

@ -611,9 +611,13 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
{ {
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
struct task_struct *tsk = NULL;
if (entity_is_task(se))
tsk = task_of(se);
if (se->sleep_start) { if (se->sleep_start) {
u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
struct task_struct *tsk = task_of(se);
if ((s64)delta < 0) if ((s64)delta < 0)
delta = 0; delta = 0;
@ -624,11 +628,11 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->sleep_start = 0; se->sleep_start = 0;
se->sum_sleep_runtime += delta; se->sum_sleep_runtime += delta;
account_scheduler_latency(tsk, delta >> 10, 1); if (tsk)
account_scheduler_latency(tsk, delta >> 10, 1);
} }
if (se->block_start) { if (se->block_start) {
u64 delta = rq_of(cfs_rq)->clock - se->block_start; u64 delta = rq_of(cfs_rq)->clock - se->block_start;
struct task_struct *tsk = task_of(se);
if ((s64)delta < 0) if ((s64)delta < 0)
delta = 0; delta = 0;
@ -639,17 +643,19 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->block_start = 0; se->block_start = 0;
se->sum_sleep_runtime += delta; se->sum_sleep_runtime += delta;
/* if (tsk) {
* Blocking time is in units of nanosecs, so shift by 20 to /*
* get a milliseconds-range estimation of the amount of * Blocking time is in units of nanosecs, so shift by
* time that the task spent sleeping: * 20 to get a milliseconds-range estimation of the
*/ * amount of time that the task spent sleeping:
if (unlikely(prof_on == SLEEP_PROFILING)) { */
if (unlikely(prof_on == SLEEP_PROFILING)) {
profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), profile_hits(SLEEP_PROFILING,
delta >> 20); (void *)get_wchan(tsk),
delta >> 20);
}
account_scheduler_latency(tsk, delta >> 10, 0);
} }
account_scheduler_latency(tsk, delta >> 10, 0);
} }
#endif #endif
} }