forked from Minki/linux
More power management updates for v4.8-rc1
- Prevent the low-level assembly hibernate code on x86-64 from referring to __PAGE_OFFSET directly as a symbol which doesn't work when the kernel identity mapping base is randomized, in which case __PAGE_OFFSET is a variable (Rafael Wysocki). - Avoid selecting CPU_FREQ_STAT by default as the statistics are not required for proper cpufreq operation (Borislav Petkov). - Add Skylake-X and Broadwell-X IDs to the intel_pstate's list of processors where out-of-band (OBB) control of P-states is possible and if that is in use, intel_pstate should not attempt to manage P-states (Srinivas Pandruvada). - Drop some unnecessary checks from the wakeup IRQ handling code in the PM core (Markus Elfring). - Reduce the number operating performance point (OPP) lookups in one of the OPP framework's helper functions (Jisheng Zhang). -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQIcBAABCAAGBQJXpKLNAAoJEILEb/54YlRx6L4P/39GR0kVB7vIyCajdWc4f3gh 0zh5RbKC0YT4F6upebzgQ9uYS9cv4y+df7ShVwKQAea3wDReEmZhM/egOGw0Ls+8 SS1MiJq1LSekyMIWH6cXwZsH69/V0LuWTBbzWYBgHUDbfEMlgwV5ZZMEH14/2bWw d4SLUUiW5P42im+IDAxpdYneKOrbJo3txj6WbOutgtIrHdPko6lF1dDouKvI1QTk zCBOkEB9nELq3rWN/sbPmHzbmbj/yFiiHk5+iqwuKKJZI8PQB9/C6Qmc3wvjtPpc GXLPI+OHqLgBMofGsiKOvm3hPQAIjf/ERsUilHLE3qOi/Mi0qj7U4dFivZrPWaCG j2bD+b36TffmG1r8L7NYbEKU60syeIFSRqAngbyswu6XF+NdVboaifENGcgM3tBC pUC1mFh/4PMKP5zW9mOwE6WSntZkw14CVR+A3fRFOuTBavNvjGdckwLi/aBsZdU3 K4DJUFzdELF7+JdqnQV35yV2tgMbJhxQa/QBykiFBh3AyqliOZ8uBoIxxLinrGmH XWR3kB4ZPRBIStGI9IpG3lNhLLU7mLIdaFhGayicicAwFcLsXN7oKWVzYfUqWA9T 1ptXApcRf6S9J1JnKHkznoQ1/D1pNYLbH+t7BOtWdK8SWBhMS2Lzo2kyKWsCFkJS 16J8j1tcLDhN1dvcBT55 =WvPB -----END PGP SIGNATURE----- Merge tag 'pm-extra-4.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm Pull more power management updates from Rafael Wysocki: "A few more fixes and cleanups in the x86-64 low-level hibernation code, PM core, cpufreq (Kconfig and intel_pstate), and the operating points framework. Specifics: - Prevent the low-level assembly hibernate code on x86-64 from referring to __PAGE_OFFSET directly as a symbol which doesn't work when the kernel identity mapping base is randomized, in which case __PAGE_OFFSET is a variable (Rafael Wysocki). - Avoid selecting CPU_FREQ_STAT by default as the statistics are not required for proper cpufreq operation (Borislav Petkov). - Add Skylake-X and Broadwell-X IDs to the intel_pstate's list of processors where out-of-band (OBB) control of P-states is possible and if that is in use, intel_pstate should not attempt to manage P-states (Srinivas Pandruvada). - Drop some unnecessary checks from the wakeup IRQ handling code in the PM core (Markus Elfring). - Reduce the number operating performance point (OPP) lookups in one of the OPP framework's helper functions (Jisheng Zhang)" * tag 'pm-extra-4.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: x86/power/64: Do not refer to __PAGE_OFFSET from assembly code cpufreq: Do not default-yes CPU_FREQ_STAT cpufreq: intel_pstate: Add more out-of-band IDs PM / OPP: optimize dev_pm_opp_set_rate() performance a bit PM-wakeup: Delete unnecessary checks before three function calls
This commit is contained in:
commit
11d8ec408d
@ -37,11 +37,11 @@ unsigned long jump_address_phys;
|
||||
*/
|
||||
unsigned long restore_cr3 __visible;
|
||||
|
||||
pgd_t *temp_level4_pgt __visible;
|
||||
unsigned long temp_level4_pgt __visible;
|
||||
|
||||
unsigned long relocated_restore_code __visible;
|
||||
|
||||
static int set_up_temporary_text_mapping(void)
|
||||
static int set_up_temporary_text_mapping(pgd_t *pgd)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
pud_t *pud;
|
||||
@ -71,7 +71,7 @@ static int set_up_temporary_text_mapping(void)
|
||||
__pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
|
||||
set_pud(pud + pud_index(restore_jump_address),
|
||||
__pud(__pa(pmd) | _KERNPG_TABLE));
|
||||
set_pgd(temp_level4_pgt + pgd_index(restore_jump_address),
|
||||
set_pgd(pgd + pgd_index(restore_jump_address),
|
||||
__pgd(__pa(pud) | _KERNPG_TABLE));
|
||||
|
||||
return 0;
|
||||
@ -90,15 +90,16 @@ static int set_up_temporary_mappings(void)
|
||||
.kernel_mapping = true,
|
||||
};
|
||||
unsigned long mstart, mend;
|
||||
pgd_t *pgd;
|
||||
int result;
|
||||
int i;
|
||||
|
||||
temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!temp_level4_pgt)
|
||||
pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!pgd)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Prepare a temporary mapping for the kernel text */
|
||||
result = set_up_temporary_text_mapping();
|
||||
result = set_up_temporary_text_mapping(pgd);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
@ -107,13 +108,12 @@ static int set_up_temporary_mappings(void)
|
||||
mstart = pfn_mapped[i].start << PAGE_SHIFT;
|
||||
mend = pfn_mapped[i].end << PAGE_SHIFT;
|
||||
|
||||
result = kernel_ident_mapping_init(&info, temp_level4_pgt,
|
||||
mstart, mend);
|
||||
|
||||
result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
|
||||
if (result)
|
||||
return result;
|
||||
}
|
||||
|
||||
temp_level4_pgt = (unsigned long)pgd - __PAGE_OFFSET;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -72,8 +72,6 @@ ENTRY(restore_image)
|
||||
/* code below has been relocated to a safe page */
|
||||
ENTRY(core_restore_code)
|
||||
/* switch to temporary page tables */
|
||||
movq $__PAGE_OFFSET, %rcx
|
||||
subq %rcx, %rax
|
||||
movq %rax, %cr3
|
||||
/* flush TLB */
|
||||
movq %rbx, %rcx
|
||||
|
@ -402,6 +402,22 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
|
||||
|
||||
static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
|
||||
unsigned long *freq)
|
||||
{
|
||||
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
|
||||
|
||||
list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
|
||||
if (temp_opp->available && temp_opp->rate >= *freq) {
|
||||
opp = temp_opp;
|
||||
*freq = opp->rate;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return opp;
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
|
||||
* @dev: device for which we do this operation
|
||||
@ -427,7 +443,6 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
|
||||
unsigned long *freq)
|
||||
{
|
||||
struct opp_table *opp_table;
|
||||
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
|
||||
|
||||
opp_rcu_lockdep_assert();
|
||||
|
||||
@ -440,15 +455,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
|
||||
if (IS_ERR(opp_table))
|
||||
return ERR_CAST(opp_table);
|
||||
|
||||
list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
|
||||
if (temp_opp->available && temp_opp->rate >= *freq) {
|
||||
opp = temp_opp;
|
||||
*freq = opp->rate;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return opp;
|
||||
return _find_freq_ceil(opp_table, freq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
|
||||
|
||||
@ -612,7 +619,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
|
||||
return PTR_ERR(opp_table);
|
||||
}
|
||||
|
||||
old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq);
|
||||
old_opp = _find_freq_ceil(opp_table, &old_freq);
|
||||
if (!IS_ERR(old_opp)) {
|
||||
ou_volt = old_opp->u_volt;
|
||||
ou_volt_min = old_opp->u_volt_min;
|
||||
@ -622,7 +629,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
|
||||
__func__, old_freq, PTR_ERR(old_opp));
|
||||
}
|
||||
|
||||
opp = dev_pm_opp_find_freq_ceil(dev, &freq);
|
||||
opp = _find_freq_ceil(opp_table, &freq);
|
||||
if (IS_ERR(opp)) {
|
||||
ret = PTR_ERR(opp);
|
||||
dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
|
||||
|
@ -334,10 +334,9 @@ void device_wakeup_arm_wake_irqs(void)
|
||||
struct wakeup_source *ws;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
|
||||
if (ws->wakeirq)
|
||||
dev_pm_arm_wake_irq(ws->wakeirq);
|
||||
}
|
||||
list_for_each_entry_rcu(ws, &wakeup_sources, entry)
|
||||
dev_pm_arm_wake_irq(ws->wakeirq);
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
@ -351,10 +350,9 @@ void device_wakeup_disarm_wake_irqs(void)
|
||||
struct wakeup_source *ws;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
|
||||
if (ws->wakeirq)
|
||||
dev_pm_disarm_wake_irq(ws->wakeirq);
|
||||
}
|
||||
list_for_each_entry_rcu(ws, &wakeup_sources, entry)
|
||||
dev_pm_disarm_wake_irq(ws->wakeirq);
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
@ -390,9 +388,7 @@ int device_wakeup_disable(struct device *dev)
|
||||
return -EINVAL;
|
||||
|
||||
ws = device_wakeup_detach(dev);
|
||||
if (ws)
|
||||
wakeup_source_unregister(ws);
|
||||
|
||||
wakeup_source_unregister(ws);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(device_wakeup_disable);
|
||||
|
@ -32,7 +32,6 @@ config CPU_FREQ_BOOST_SW
|
||||
|
||||
config CPU_FREQ_STAT
|
||||
bool "CPU frequency transition statistics"
|
||||
default y
|
||||
help
|
||||
Export CPU frequency statistics information through sysfs.
|
||||
|
||||
|
@ -1374,6 +1374,8 @@ MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
|
||||
|
||||
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
|
||||
ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params),
|
||||
ICPU(INTEL_FAM6_BROADWELL_X, core_params),
|
||||
ICPU(INTEL_FAM6_SKYLAKE_X, core_params),
|
||||
{}
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user