mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 14:21:47 +00:00
sched/core: Introduce sched_asym_cpucap_active()
Create an inline helper for conditional code to be only executed on asymmetric CPU capacity systems. This makes these (currently ~10 and future) conditions a lot more readable. Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20220729111305.1275158-2-dietmar.eggemann@arm.com
This commit is contained in:
parent
9de1f9c8ca
commit
740cf8a760
@ -123,7 +123,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
|
||||
unsigned long cap, max_cap = 0;
|
||||
int cpu, max_cpu = -1;
|
||||
|
||||
if (!static_branch_unlikely(&sched_asym_cpucapacity))
|
||||
if (!sched_asym_cpucap_active())
|
||||
return 1;
|
||||
|
||||
/* Ensure the capacity of the CPUs fits the task. */
|
||||
|
@ -144,7 +144,7 @@ static inline unsigned long __dl_bw_capacity(int i)
|
||||
*/
|
||||
static inline unsigned long dl_bw_capacity(int i)
|
||||
{
|
||||
if (!static_branch_unlikely(&sched_asym_cpucapacity) &&
|
||||
if (!sched_asym_cpucap_active() &&
|
||||
capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
|
||||
return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
|
||||
} else {
|
||||
@ -1849,7 +1849,7 @@ select_task_rq_dl(struct task_struct *p, int cpu, int flags)
|
||||
* Take the capacity of the CPU into account to
|
||||
* ensure it fits the requirement of the task.
|
||||
*/
|
||||
if (static_branch_unlikely(&sched_asym_cpucapacity))
|
||||
if (sched_asym_cpucap_active())
|
||||
select_rq |= !dl_task_fits_capacity(p, cpu);
|
||||
|
||||
if (select_rq) {
|
||||
|
@ -4262,7 +4262,7 @@ static inline int task_fits_capacity(struct task_struct *p,
|
||||
|
||||
static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
|
||||
{
|
||||
if (!static_branch_unlikely(&sched_asym_cpucapacity))
|
||||
if (!sched_asym_cpucap_active())
|
||||
return;
|
||||
|
||||
if (!p || p->nr_cpus_allowed == 1) {
|
||||
@ -6506,7 +6506,7 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
|
||||
|
||||
static inline bool asym_fits_capacity(unsigned long task_util, int cpu)
|
||||
{
|
||||
if (static_branch_unlikely(&sched_asym_cpucapacity))
|
||||
if (sched_asym_cpucap_active())
|
||||
return fits_capacity(task_util, capacity_of(cpu));
|
||||
|
||||
return true;
|
||||
@ -6526,7 +6526,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
|
||||
* On asymmetric system, update task utilization because we will check
|
||||
* that the task fits with cpu's capacity.
|
||||
*/
|
||||
if (static_branch_unlikely(&sched_asym_cpucapacity)) {
|
||||
if (sched_asym_cpucap_active()) {
|
||||
sync_entity_load_avg(&p->se);
|
||||
task_util = uclamp_task_util(p);
|
||||
}
|
||||
@ -6580,7 +6580,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
|
||||
* For asymmetric CPU capacity systems, our domain of interest is
|
||||
* sd_asym_cpucapacity rather than sd_llc.
|
||||
*/
|
||||
if (static_branch_unlikely(&sched_asym_cpucapacity)) {
|
||||
if (sched_asym_cpucap_active()) {
|
||||
sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
|
||||
/*
|
||||
* On an asymmetric CPU capacity system where an exclusive
|
||||
|
@ -509,7 +509,7 @@ static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
|
||||
unsigned int cpu_cap;
|
||||
|
||||
/* Only heterogeneous systems can benefit from this check */
|
||||
if (!static_branch_unlikely(&sched_asym_cpucapacity))
|
||||
if (!sched_asym_cpucap_active())
|
||||
return true;
|
||||
|
||||
min_cap = uclamp_eff_value(p, UCLAMP_MIN);
|
||||
@ -1897,7 +1897,7 @@ static int find_lowest_rq(struct task_struct *task)
|
||||
* If we're on asym system ensure we consider the different capacities
|
||||
* of the CPUs when searching for the lowest_mask.
|
||||
*/
|
||||
if (static_branch_unlikely(&sched_asym_cpucapacity)) {
|
||||
if (sched_asym_cpucap_active()) {
|
||||
|
||||
ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
|
||||
task, lowest_mask,
|
||||
|
@ -1813,6 +1813,11 @@ DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
|
||||
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
|
||||
extern struct static_key_false sched_asym_cpucapacity;
|
||||
|
||||
static __always_inline bool sched_asym_cpucap_active(void)
|
||||
{
|
||||
return static_branch_unlikely(&sched_asym_cpucapacity);
|
||||
}
|
||||
|
||||
struct sched_group_capacity {
|
||||
atomic_t ref;
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user