A single fix for the x86 scheduler topology:

Using cluster topology on hybrid CPUs, e.g. Alder Lake, biases the
   scheduler towards the ATOM cluster as that has more total capacity.
   Use selection based on CPU priority instead.
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmG1xxETHHRnbHhAbGlu
 dXRyb25peC5kZQAKCRCmGPVMDXSYoWhaEADFlr1l6QwIJs8Bv3Z9u/kkW1fPIoqP
 aFDrqLiQ/T+gBuS+d7R3YkRf1cICS+gbvv/dw8qx/v6gerUC71tEXxYxvOUqR7QL
 wX2zFNrhQqBaIKKGnbTHe47pB2+/AlpbXuQ2/j415AHxCxB4fOC1N4VKq0hTUyPu
 Ubz+QmOHcB0NhAf1jeN9DAlTJgQNbRW6oYYPJDsr+n+WjdlN3IZW/hCxLN1cvnNd
 NzsDEK1EeuccteoT5btzYy+XB31+WNrZ9afcJgG7goO/zmJfZ1lWAsyjKPocn6dC
 ozZSvHQhbgJIAwBaZJFyjZocgHTIkdFX6NzlpKaWw05tk984hJJ9pvg4KO5YeNx/
 tIehPnkmd9GwwO541+t3DhcbCPQIiKypRZKnXuczkjIuG3t2oD25b1EAqwhoPKGk
 TEX2o4DjixS3oKUP6khDteQkj8B0qOjeY18eGjBilJ764mRfbL+GRk/NoqVidrnt
 JWSCPEMM842Y8W7GYfZZZAypqDz9U0yRl+BCKpNDcx3Nr5OSbfkWC/LdvM3LlgIE
 /a2C6CXIZXgiifTF2PHOmv0O3/VPAtHXZ01ExzZu/MJP0hBUjLrxsj+TwILgociw
 WLVYnBgCf5XN1rMPmora8P5y+O/UpEYhXbmIFa6HQAQyfc0Ffiof4ZOl5VYf3mNh
 1GLB+ByEgCz4ig==
 =rr0v
 -----END PGP SIGNATURE-----

Merge tag 'sched-urgent-2021-12-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fix from Thomas Gleixner:
 "A single fix for the x86 scheduler topology:

  Using cluster topology on hybrid CPUs, e.g. Alder Lake, biases the
  scheduler towards the ATOM cluster as that has more total capacity.
  Use selection based on CPU priority instead"

* tag 'sched-urgent-2021-12-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched,x86: Don't use cluster topology for x86 hybrid CPUs
This commit is contained in:
Linus Torvalds 2021-12-12 09:38:04 -08:00
commit 773602256a

View File

@ -579,6 +579,17 @@ static struct sched_domain_topology_level x86_numa_in_package_topology[] = {
{ NULL, }, { NULL, },
}; };
static struct sched_domain_topology_level x86_hybrid_topology[] = {
#ifdef CONFIG_SCHED_SMT
{ cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
#endif
#ifdef CONFIG_SCHED_MC
{ cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
#endif
{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
{ NULL, },
};
static struct sched_domain_topology_level x86_topology[] = { static struct sched_domain_topology_level x86_topology[] = {
#ifdef CONFIG_SCHED_SMT #ifdef CONFIG_SCHED_SMT
{ cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) }, { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
@ -1469,8 +1480,11 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
calculate_max_logical_packages(); calculate_max_logical_packages();
/* XXX for now assume numa-in-package and hybrid don't overlap */
if (x86_has_numa_in_package) if (x86_has_numa_in_package)
set_sched_topology(x86_numa_in_package_topology); set_sched_topology(x86_numa_in_package_topology);
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
set_sched_topology(x86_hybrid_topology);
nmi_selftest(); nmi_selftest();
impress_friends(); impress_friends();