mirror of
https://github.com/torvalds/linux.git
synced 2024-12-30 14:52:05 +00:00
sched/cpufreq: Provide migration hint
It was suggested that a migration hint might be usefull for the CPU-freq governors. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Juri Lelli <juri.lelli@arm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Viresh Kumar <viresh.kumar@linaro.org> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
00357f5ec5
commit
ea14b57e8a
@ -9,6 +9,7 @@
|
||||
*/
|
||||
|
||||
#define SCHED_CPUFREQ_IOWAIT (1U << 0)
|
||||
#define SCHED_CPUFREQ_MIGRATION (1U << 1)
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
struct update_util_data {
|
||||
|
@ -772,7 +772,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
|
||||
* For !fair tasks do:
|
||||
*
|
||||
update_cfs_rq_load_avg(now, cfs_rq);
|
||||
attach_entity_load_avg(cfs_rq, se);
|
||||
attach_entity_load_avg(cfs_rq, se, 0);
|
||||
switched_from_fair(rq, p);
|
||||
*
|
||||
* such that the next switched_to_fair() has the
|
||||
@ -3009,11 +3009,11 @@ static inline void update_cfs_group(struct sched_entity *se)
|
||||
}
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
|
||||
static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
|
||||
static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
|
||||
{
|
||||
struct rq *rq = rq_of(cfs_rq);
|
||||
|
||||
if (&rq->cfs == cfs_rq) {
|
||||
if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) {
|
||||
/*
|
||||
* There are a few boundary cases this might miss but it should
|
||||
* get called often enough that that should (hopefully) not be
|
||||
@ -3028,7 +3028,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
|
||||
*
|
||||
* See cpu_util().
|
||||
*/
|
||||
cpufreq_update_util(rq, 0);
|
||||
cpufreq_update_util(rq, flags);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3686,7 +3686,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
||||
#endif
|
||||
|
||||
if (decayed)
|
||||
cfs_rq_util_change(cfs_rq);
|
||||
cfs_rq_util_change(cfs_rq, 0);
|
||||
|
||||
return decayed;
|
||||
}
|
||||
@ -3699,7 +3699,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
||||
* Must call update_cfs_rq_load_avg() before this, since we rely on
|
||||
* cfs_rq->avg.last_update_time being current.
|
||||
*/
|
||||
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||
{
|
||||
u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib;
|
||||
|
||||
@ -3735,7 +3735,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
|
||||
|
||||
add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
|
||||
|
||||
cfs_rq_util_change(cfs_rq);
|
||||
cfs_rq_util_change(cfs_rq, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -3754,7 +3754,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
|
||||
|
||||
add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
|
||||
|
||||
cfs_rq_util_change(cfs_rq);
|
||||
cfs_rq_util_change(cfs_rq, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3784,7 +3784,14 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
|
||||
|
||||
if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
|
||||
|
||||
attach_entity_load_avg(cfs_rq, se);
|
||||
/*
|
||||
* DO_ATTACH means we're here from enqueue_entity().
|
||||
* !last_update_time means we've passed through
|
||||
* migrate_task_rq_fair() indicating we migrated.
|
||||
*
|
||||
* IOW we're enqueueing a task on a new CPU.
|
||||
*/
|
||||
attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION);
|
||||
update_tg_load_avg(cfs_rq, 0);
|
||||
|
||||
} else if (decayed && (flags & UPDATE_TG))
|
||||
@ -3880,13 +3887,13 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
||||
|
||||
static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
|
||||
{
|
||||
cfs_rq_util_change(cfs_rq);
|
||||
cfs_rq_util_change(cfs_rq, 0);
|
||||
}
|
||||
|
||||
static inline void remove_entity_load_avg(struct sched_entity *se) {}
|
||||
|
||||
static inline void
|
||||
attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
|
||||
attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {}
|
||||
static inline void
|
||||
detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
|
||||
|
||||
@ -9726,7 +9733,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
|
||||
|
||||
/* Synchronize entity with its cfs_rq */
|
||||
update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
|
||||
attach_entity_load_avg(cfs_rq, se);
|
||||
attach_entity_load_avg(cfs_rq, se, 0);
|
||||
update_tg_load_avg(cfs_rq, false);
|
||||
propagate_entity_cfs_rq(se);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user