diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 96a81b2fa281..95f7092043f3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9411,7 +9411,7 @@ static unsigned long task_h_load(struct task_struct *p) } #endif -static void update_blocked_averages(int cpu) +static void sched_balance_update_blocked_averages(int cpu) { bool decayed = false, done = true; struct rq *rq = cpu_rq(cpu); @@ -12079,7 +12079,7 @@ static bool update_nohz_stats(struct rq *rq) if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick))) return true; - update_blocked_averages(cpu); + sched_balance_update_blocked_averages(cpu); return rq->has_blocked_load; } @@ -12339,7 +12339,7 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) raw_spin_rq_unlock(this_rq); t0 = sched_clock_cpu(this_cpu); - update_blocked_averages(this_cpu); + sched_balance_update_blocked_averages(this_cpu); rcu_read_lock(); for_each_domain(this_cpu, sd) { @@ -12431,7 +12431,7 @@ static __latent_entropy void sched_balance_softirq(struct softirq_action *h) return; /* normal load balance */ - update_blocked_averages(this_rq->cpu); + sched_balance_update_blocked_averages(this_rq->cpu); sched_balance_domains(this_rq, idle); } diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c index 63b6cf898220..f80955ecdce6 100644 --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c @@ -209,7 +209,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa, * This means that weight will be 0 but not running for a sched_entity * but also for a cfs_rq if the latter becomes idle. As an example, * this happens during idle_balance() which calls - * update_blocked_averages(). + * sched_balance_update_blocked_averages(). * * Also see the comment in accumulate_sum(). */