mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
sched/balancing: Simplify the sg_status bitmask and use separate ->overloaded and ->overutilized flags
SG_OVERLOADED and SG_OVERUTILIZED flags plus the sg_status bitmask are an unnecessary complication that only make the code harder to read and slower. We only ever set them separately: thule:~/tip> git grep SG_OVER kernel/sched/ kernel/sched/fair.c: set_rd_overutilized_status(rq->rd, SG_OVERUTILIZED); kernel/sched/fair.c: *sg_status |= SG_OVERLOADED; kernel/sched/fair.c: *sg_status |= SG_OVERUTILIZED; kernel/sched/fair.c: *sg_status |= SG_OVERLOADED; kernel/sched/fair.c: set_rd_overloaded(env->dst_rq->rd, sg_status & SG_OVERLOADED); kernel/sched/fair.c: sg_status & SG_OVERUTILIZED); kernel/sched/fair.c: } else if (sg_status & SG_OVERUTILIZED) { kernel/sched/fair.c: set_rd_overutilized_status(env->dst_rq->rd, SG_OVERUTILIZED); kernel/sched/sched.h:#define SG_OVERLOADED 0x1 /* More than one runnable task on a CPU. */ kernel/sched/sched.h:#define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */ kernel/sched/sched.h: set_rd_overloaded(rq->rd, SG_OVERLOADED); And use them separately, which results in suboptimal code: /* update overload indicator if we are at root domain */ set_rd_overloaded(env->dst_rq->rd, sg_status & SG_OVERLOADED); /* Update over-utilization (tipping point, U >= 0) indicator */ set_rd_overutilized_status(env->dst_rq->rd, Introduce separate sg_overloaded and sg_overutilized flags in update_sd_lb_stats() and its lower level functions, and change all of them to 'bool'. Remove the now unused SG_OVERLOADED and SG_OVERUTILIZED flags. Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Shrikanth Hegde <sshegde@linux.ibm.com> Tested-by: Shrikanth Hegde <sshegde@linux.ibm.com> Cc: Qais Yousef <qyousef@layalina.io> Cc: Vincent Guittot <vincent.guittot@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lore.kernel.org/r/ZgVPhODZ8/nbsqbP@gmail.com
This commit is contained in:
parent
4d0a63e5b8
commit
4475cd8bfd
@ -6688,19 +6688,18 @@ static inline bool cpu_overutilized(int cpu)
|
|||||||
/*
|
/*
|
||||||
* overutilized value make sense only if EAS is enabled
|
* overutilized value make sense only if EAS is enabled
|
||||||
*/
|
*/
|
||||||
static inline int is_rd_overutilized(struct root_domain *rd)
|
static inline bool is_rd_overutilized(struct root_domain *rd)
|
||||||
{
|
{
|
||||||
return !sched_energy_enabled() || READ_ONCE(rd->overutilized);
|
return !sched_energy_enabled() || READ_ONCE(rd->overutilized);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void set_rd_overutilized(struct root_domain *rd,
|
static inline void set_rd_overutilized(struct root_domain *rd, bool flag)
|
||||||
unsigned int status)
|
|
||||||
{
|
{
|
||||||
if (!sched_energy_enabled())
|
if (!sched_energy_enabled())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
WRITE_ONCE(rd->overutilized, status);
|
WRITE_ONCE(rd->overutilized, flag);
|
||||||
trace_sched_overutilized_tp(rd, !!status);
|
trace_sched_overutilized_tp(rd, flag);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void check_update_overutilized_status(struct rq *rq)
|
static inline void check_update_overutilized_status(struct rq *rq)
|
||||||
@ -6711,7 +6710,7 @@ static inline void check_update_overutilized_status(struct rq *rq)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
if (!is_rd_overutilized(rq->rd) && cpu_overutilized(rq->cpu))
|
if (!is_rd_overutilized(rq->rd) && cpu_overutilized(rq->cpu))
|
||||||
set_rd_overutilized(rq->rd, SG_OVERUTILIZED);
|
set_rd_overutilized(rq->rd, 1);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void check_update_overutilized_status(struct rq *rq) { }
|
static inline void check_update_overutilized_status(struct rq *rq) { }
|
||||||
@ -9934,13 +9933,15 @@ sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
|
|||||||
* @sds: Load-balancing data with statistics of the local group.
|
* @sds: Load-balancing data with statistics of the local group.
|
||||||
* @group: sched_group whose statistics are to be updated.
|
* @group: sched_group whose statistics are to be updated.
|
||||||
* @sgs: variable to hold the statistics for this group.
|
* @sgs: variable to hold the statistics for this group.
|
||||||
* @sg_status: Holds flag indicating the status of the sched_group
|
* @sg_overloaded: sched_group is overloaded
|
||||||
|
* @sg_overutilized: sched_group is overutilized
|
||||||
*/
|
*/
|
||||||
static inline void update_sg_lb_stats(struct lb_env *env,
|
static inline void update_sg_lb_stats(struct lb_env *env,
|
||||||
struct sd_lb_stats *sds,
|
struct sd_lb_stats *sds,
|
||||||
struct sched_group *group,
|
struct sched_group *group,
|
||||||
struct sg_lb_stats *sgs,
|
struct sg_lb_stats *sgs,
|
||||||
int *sg_status)
|
bool *sg_overloaded,
|
||||||
|
bool *sg_overutilized)
|
||||||
{
|
{
|
||||||
int i, nr_running, local_group;
|
int i, nr_running, local_group;
|
||||||
|
|
||||||
@ -9961,10 +9962,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
|
|||||||
sgs->sum_nr_running += nr_running;
|
sgs->sum_nr_running += nr_running;
|
||||||
|
|
||||||
if (nr_running > 1)
|
if (nr_running > 1)
|
||||||
*sg_status |= SG_OVERLOADED;
|
*sg_overloaded = 1;
|
||||||
|
|
||||||
if (cpu_overutilized(i))
|
if (cpu_overutilized(i))
|
||||||
*sg_status |= SG_OVERUTILIZED;
|
*sg_overutilized = 1;
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA_BALANCING
|
#ifdef CONFIG_NUMA_BALANCING
|
||||||
sgs->nr_numa_running += rq->nr_numa_running;
|
sgs->nr_numa_running += rq->nr_numa_running;
|
||||||
@ -9986,7 +9987,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
|
|||||||
/* Check for a misfit task on the cpu */
|
/* Check for a misfit task on the cpu */
|
||||||
if (sgs->group_misfit_task_load < rq->misfit_task_load) {
|
if (sgs->group_misfit_task_load < rq->misfit_task_load) {
|
||||||
sgs->group_misfit_task_load = rq->misfit_task_load;
|
sgs->group_misfit_task_load = rq->misfit_task_load;
|
||||||
*sg_status |= SG_OVERLOADED;
|
*sg_overloaded = 1;
|
||||||
}
|
}
|
||||||
} else if (env->idle && sched_reduced_capacity(rq, env->sd)) {
|
} else if (env->idle && sched_reduced_capacity(rq, env->sd)) {
|
||||||
/* Check for a task running on a CPU with reduced capacity */
|
/* Check for a task running on a CPU with reduced capacity */
|
||||||
@ -10612,7 +10613,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
|
|||||||
struct sg_lb_stats *local = &sds->local_stat;
|
struct sg_lb_stats *local = &sds->local_stat;
|
||||||
struct sg_lb_stats tmp_sgs;
|
struct sg_lb_stats tmp_sgs;
|
||||||
unsigned long sum_util = 0;
|
unsigned long sum_util = 0;
|
||||||
int sg_status = 0;
|
bool sg_overloaded = 0, sg_overutilized = 0;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
struct sg_lb_stats *sgs = &tmp_sgs;
|
struct sg_lb_stats *sgs = &tmp_sgs;
|
||||||
@ -10628,7 +10629,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
|
|||||||
update_group_capacity(env->sd, env->dst_cpu);
|
update_group_capacity(env->sd, env->dst_cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
update_sg_lb_stats(env, sds, sg, sgs, &sg_status);
|
update_sg_lb_stats(env, sds, sg, sgs, &sg_overloaded, &sg_overutilized);
|
||||||
|
|
||||||
if (!local_group && update_sd_pick_busiest(env, sds, sg, sgs)) {
|
if (!local_group && update_sd_pick_busiest(env, sds, sg, sgs)) {
|
||||||
sds->busiest = sg;
|
sds->busiest = sg;
|
||||||
@ -10657,13 +10658,12 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
|
|||||||
|
|
||||||
if (!env->sd->parent) {
|
if (!env->sd->parent) {
|
||||||
/* update overload indicator if we are at root domain */
|
/* update overload indicator if we are at root domain */
|
||||||
set_rd_overloaded(env->dst_rq->rd, sg_status & SG_OVERLOADED);
|
set_rd_overloaded(env->dst_rq->rd, sg_overloaded);
|
||||||
|
|
||||||
/* Update over-utilization (tipping point, U >= 0) indicator */
|
/* Update over-utilization (tipping point, U >= 0) indicator */
|
||||||
set_rd_overutilized(env->dst_rq->rd,
|
set_rd_overutilized(env->dst_rq->rd, sg_overloaded);
|
||||||
sg_status & SG_OVERUTILIZED);
|
} else if (sg_overutilized) {
|
||||||
} else if (sg_status & SG_OVERUTILIZED) {
|
set_rd_overutilized(env->dst_rq->rd, sg_overutilized);
|
||||||
set_rd_overutilized(env->dst_rq->rd, SG_OVERUTILIZED);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
update_idle_cpu_scan(env, sum_util);
|
update_idle_cpu_scan(env, sum_util);
|
||||||
|
@ -713,7 +713,7 @@ struct rt_rq {
|
|||||||
} highest_prio;
|
} highest_prio;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
int overloaded;
|
bool overloaded;
|
||||||
struct plist_head pushable_tasks;
|
struct plist_head pushable_tasks;
|
||||||
|
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
@ -757,7 +757,7 @@ struct dl_rq {
|
|||||||
u64 next;
|
u64 next;
|
||||||
} earliest_dl;
|
} earliest_dl;
|
||||||
|
|
||||||
int overloaded;
|
bool overloaded;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Tasks on this rq that can be pushed away. They are kept in
|
* Tasks on this rq that can be pushed away. They are kept in
|
||||||
@ -850,10 +850,6 @@ struct perf_domain {
|
|||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Scheduling group status flags */
|
|
||||||
#define SG_OVERLOADED 0x1 /* More than one runnable task on a CPU. */
|
|
||||||
#define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We add the notion of a root-domain which will be used to define per-domain
|
* We add the notion of a root-domain which will be used to define per-domain
|
||||||
* variables. Each exclusive cpuset essentially defines an island domain by
|
* variables. Each exclusive cpuset essentially defines an island domain by
|
||||||
@ -874,10 +870,10 @@ struct root_domain {
|
|||||||
* - More than one runnable task
|
* - More than one runnable task
|
||||||
* - Running task is misfit
|
* - Running task is misfit
|
||||||
*/
|
*/
|
||||||
int overloaded;
|
bool overloaded;
|
||||||
|
|
||||||
/* Indicate one or more cpus over-utilized (tipping point) */
|
/* Indicate one or more cpus over-utilized (tipping point) */
|
||||||
int overutilized;
|
bool overutilized;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The bit corresponding to a CPU gets set here if such CPU has more
|
* The bit corresponding to a CPU gets set here if such CPU has more
|
||||||
@ -2540,9 +2536,8 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
if (prev_nr < 2 && rq->nr_running >= 2) {
|
if (prev_nr < 2 && rq->nr_running >= 2)
|
||||||
set_rd_overloaded(rq->rd, SG_OVERLOADED);
|
set_rd_overloaded(rq->rd, 1);
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
sched_update_tick_dependency(rq);
|
sched_update_tick_dependency(rq);
|
||||||
|
Loading…
Reference in New Issue
Block a user