sched/topology: Small cleanup

Move the allocation of topology specific cpumasks into the topology
code.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra 2017-04-25 15:29:40 +02:00 committed by Ingo Molnar
parent 73bb059f9b
commit 8d5dc5126b
3 changed files with 7 additions and 8 deletions

View File

@ -5958,7 +5958,6 @@ void __init sched_init_smp(void)
cpumask_var_t non_isolated_cpus; cpumask_var_t non_isolated_cpus;
alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
sched_init_numa(); sched_init_numa();
@ -5968,7 +5967,7 @@ void __init sched_init_smp(void)
* happen. * happen.
*/ */
mutex_lock(&sched_domains_mutex); mutex_lock(&sched_domains_mutex);
init_sched_domains(cpu_active_mask); sched_init_domains(cpu_active_mask);
cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
if (cpumask_empty(non_isolated_cpus)) if (cpumask_empty(non_isolated_cpus))
cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
@ -6197,7 +6196,6 @@ void __init sched_init(void)
calc_load_update = jiffies + LOAD_FREQ; calc_load_update = jiffies + LOAD_FREQ;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
/* May be allocated at isolcpus cmdline parse time */ /* May be allocated at isolcpus cmdline parse time */
if (cpu_isolated_map == NULL) if (cpu_isolated_map == NULL)
zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);

View File

@ -606,11 +606,9 @@ struct root_domain {
extern struct root_domain def_root_domain; extern struct root_domain def_root_domain;
extern struct mutex sched_domains_mutex; extern struct mutex sched_domains_mutex;
extern cpumask_var_t fallback_doms;
extern cpumask_var_t sched_domains_tmpmask;
extern void init_defrootdomain(void); extern void init_defrootdomain(void);
extern int init_sched_domains(const struct cpumask *cpu_map); extern int sched_init_domains(const struct cpumask *cpu_map);
extern void rq_attach_root(struct rq *rq, struct root_domain *rd); extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */

View File

@ -1526,7 +1526,7 @@ static struct sched_domain_attr *dattr_cur;
* cpumask) fails, then fallback to a single sched domain, * cpumask) fails, then fallback to a single sched domain,
* as determined by the single cpumask fallback_doms. * as determined by the single cpumask fallback_doms.
*/ */
cpumask_var_t fallback_doms; static cpumask_var_t fallback_doms;
/* /*
* arch_update_cpu_topology lets virtualized architectures update the * arch_update_cpu_topology lets virtualized architectures update the
@ -1568,10 +1568,13 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
* For now this just excludes isolated CPUs, but could be used to * For now this just excludes isolated CPUs, but could be used to
* exclude other special cases in the future. * exclude other special cases in the future.
*/ */
int init_sched_domains(const struct cpumask *cpu_map) int sched_init_domains(const struct cpumask *cpu_map)
{ {
int err; int err;
zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL);
zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
arch_update_cpu_topology(); arch_update_cpu_topology();
ndoms_cur = 1; ndoms_cur = 1;
doms_cur = alloc_sched_domains(ndoms_cur); doms_cur = alloc_sched_domains(ndoms_cur);