mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
More power management updates for v4.11-rc1
- Fix for a cpuidle menu governor problem that started to take an unnecessary spinlock after one of the recent updates and that did not play well with the RT patch (Rafael Wysocki). - Fix for the new intel_pstate operation mode switching feature added recently that did not reinitialize P-state limits properly when switching operation modes (Rafael Wysocki). - Removal of unused global notifiers from the PM QoS framework (Viresh Kumar). - Generic power domains framework update to make it handle asynchronous invocations of PM callbacks in the "noirq" phases of system suspend/hibernation correctly (Ulf Hansson). - Two hibernation core cleanups (Rafael Wysocki). - intel_idle cleanup related to the sysfs interface (Len Brown). - Off-by-one bug fix in the OPP (Operating Performance Points) framework (Andrzej Hajda). - OPP framework's documentation fix (Viresh Kumar). - cpufreq qoriq driver cleanup (Tang Yuantian). - Fixes for typos in comments in the device runtime PM framework (Christophe Jaillet). -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQIcBAABCAAGBQJYuLeGAAoJEILEb/54YlRxJvcP/0BRmh8Hn4Itx/NIWNg71X6j U+v8Pn8T3MP33gCcleLYlgre2JUIAUDmhdK99+UOx+/abhjMhQSaF3HhTOwYaPtQ 6njoHVS0NnfqUf+x5kp+EpRxBVNucYVbdRTVd1DsHIeLLz/96DFOzb/R7tko/pKx pFMWvNdotHLLgXOG1UvdRimwTDlFMffxFzD8Se53LPjRXS0S73A5VWfqZOye44Re j3W1AJ0Idgq5uduA6J8x1MWbaxDq1h+j6CSUm05yvqrINzxXwXt0Hv6stCQTo+Gb YMdiBd8MujNyAgcchw3jiDQ8Vp+zmfLPcHrfPe//SSefj26eB8LyVNSYelvbUdOz cNjvyErva37MmaegCL9QC7WbLM+A7VE6bU6YzDCi/rR8jYMJ51Fb9jGiYb/oimry OLlblEekikUsskWv4hGV1JVt5VhmUMlagWtexxn+lMszATcZro0tfXu/vgQWksYs noUnwuWJWxvj2aNMsvbzW3HLlTGSmYl2UxJ7IymQQaTDblwF9Kg61rm3+5coUctd ifceynDVp9Gju25faYgZ+Dq9+o8ktlOGOHRRPdLIRNJ/T+4tUDnlGkdbPb+Tfn03 XUIzYCu74U8/oW8gOk6t0WpmWzvxEXNgdirdEIR6y3loYIC0Jr3v4gyD975Eug74 Hzfrdg7ignAmWV+nf6UY =SeUF -----END PGP SIGNATURE----- Merge tag 'pm-extra-4.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm Pull more power management updates deom Rafael Wysocki: "These fix two bugs introduced by recent power management updates (in the cpuidle menu governor and intel_pstate) and a few other issues, clean up things and remove unused code. Specifics: - Fix for a cpuidle menu governor problem that started to take an unnecessary spinlock after one of the recent updates and that did not play well with the RT patch (Rafael Wysocki). - Fix for the new intel_pstate operation mode switching feature added recently that did not reinitialize P-state limits properly when switching operation modes (Rafael Wysocki). - Removal of unused global notifiers from the PM QoS framework (Viresh Kumar). - Generic power domains framework update to make it handle asynchronous invocations of PM callbacks in the "noirq" phases of system suspend/hibernation correctly (Ulf Hansson). - Two hibernation core cleanups (Rafael Wysocki). - intel_idle cleanup related to the sysfs interface (Len Brown). - Off-by-one bug fix in the OPP (Operating Performance Points) framework (Andrzej Hajda). - OPP framework's documentation fix (Viresh Kumar). - cpufreq qoriq driver cleanup (Tang Yuantian). - Fixes for typos in comments in the device runtime PM framework (Christophe Jaillet)" * tag 'pm-extra-4.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: PM / OPP: Documentation: Fix opp-microvolt in examples intel_idle: stop exposing platform acronyms in sysfs cpufreq: intel_pstate: Fix limits issue with operation mode switching PM / hibernate: Define pr_fmt() and use pr_*() instead of printk() PM / hibernate: Untangle power_down() cpuidle: menu: Avoid taking spinlock for accessing QoS values PM / QoS: Remove global notifiers PM / runtime: Fix some typos cpufreq: qoriq: clean up unused code PM / OPP: fix off-by-one bug in dev_pm_opp_get_max_volt_latency loop PM / Domains: Power off masters immediately in the power off sequence PM / Domains: Rename is_async to one_dev_on for genpd_power_off() PM / Domains: Move genpd_power_off() above genpd_power_on()
This commit is contained in:
commit
080e4168c0
@ -188,14 +188,14 @@ Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together.
|
||||
|
||||
opp@1000000000 {
|
||||
opp-hz = /bits/ 64 <1000000000>;
|
||||
opp-microvolt = <970000 975000 985000>;
|
||||
opp-microvolt = <975000 970000 985000>;
|
||||
opp-microamp = <70000>;
|
||||
clock-latency-ns = <300000>;
|
||||
opp-suspend;
|
||||
};
|
||||
opp@1100000000 {
|
||||
opp-hz = /bits/ 64 <1100000000>;
|
||||
opp-microvolt = <980000 1000000 1010000>;
|
||||
opp-microvolt = <1000000 980000 1010000>;
|
||||
opp-microamp = <80000>;
|
||||
clock-latency-ns = <310000>;
|
||||
};
|
||||
@ -267,14 +267,14 @@ independently.
|
||||
|
||||
opp@1000000000 {
|
||||
opp-hz = /bits/ 64 <1000000000>;
|
||||
opp-microvolt = <970000 975000 985000>;
|
||||
opp-microvolt = <975000 970000 985000>;
|
||||
opp-microamp = <70000>;
|
||||
clock-latency-ns = <300000>;
|
||||
opp-suspend;
|
||||
};
|
||||
opp@1100000000 {
|
||||
opp-hz = /bits/ 64 <1100000000>;
|
||||
opp-microvolt = <980000 1000000 1010000>;
|
||||
opp-microvolt = <1000000 980000 1010000>;
|
||||
opp-microamp = <80000>;
|
||||
clock-latency-ns = <310000>;
|
||||
};
|
||||
@ -343,14 +343,14 @@ DVFS state together.
|
||||
|
||||
opp@1000000000 {
|
||||
opp-hz = /bits/ 64 <1000000000>;
|
||||
opp-microvolt = <970000 975000 985000>;
|
||||
opp-microvolt = <975000 970000 985000>;
|
||||
opp-microamp = <70000>;
|
||||
clock-latency-ns = <300000>;
|
||||
opp-suspend;
|
||||
};
|
||||
opp@1100000000 {
|
||||
opp-hz = /bits/ 64 <1100000000>;
|
||||
opp-microvolt = <980000 1000000 1010000>;
|
||||
opp-microvolt = <1000000 980000 1010000>;
|
||||
opp-microamp = <80000>;
|
||||
clock-latency-ns = <310000>;
|
||||
};
|
||||
@ -369,7 +369,7 @@ DVFS state together.
|
||||
|
||||
opp@1300000000 {
|
||||
opp-hz = /bits/ 64 <1300000000>;
|
||||
opp-microvolt = <1045000 1050000 1055000>;
|
||||
opp-microvolt = <1050000 1045000 1055000>;
|
||||
opp-microamp = <95000>;
|
||||
clock-latency-ns = <400000>;
|
||||
opp-suspend;
|
||||
@ -382,7 +382,7 @@ DVFS state together.
|
||||
};
|
||||
opp@1500000000 {
|
||||
opp-hz = /bits/ 64 <1500000000>;
|
||||
opp-microvolt = <1010000 1100000 1110000>;
|
||||
opp-microvolt = <1100000 1010000 1110000>;
|
||||
opp-microamp = <95000>;
|
||||
clock-latency-ns = <400000>;
|
||||
turbo-mode;
|
||||
@ -424,9 +424,9 @@ Example 4: Handling multiple regulators
|
||||
|
||||
opp@1000000000 {
|
||||
opp-hz = /bits/ 64 <1000000000>;
|
||||
opp-microvolt = <970000 975000 985000>, /* Supply 0 */
|
||||
<960000 965000 975000>, /* Supply 1 */
|
||||
<960000 965000 975000>; /* Supply 2 */
|
||||
opp-microvolt = <975000 970000 985000>, /* Supply 0 */
|
||||
<965000 960000 975000>, /* Supply 1 */
|
||||
<965000 960000 975000>; /* Supply 2 */
|
||||
opp-microamp = <70000>, /* Supply 0 */
|
||||
<70000>, /* Supply 1 */
|
||||
<70000>; /* Supply 2 */
|
||||
@ -437,9 +437,9 @@ Example 4: Handling multiple regulators
|
||||
|
||||
opp@1000000000 {
|
||||
opp-hz = /bits/ 64 <1000000000>;
|
||||
opp-microvolt = <970000 975000 985000>, /* Supply 0 */
|
||||
<960000 965000 975000>, /* Supply 1 */
|
||||
<960000 965000 975000>; /* Supply 2 */
|
||||
opp-microvolt = <975000 970000 985000>, /* Supply 0 */
|
||||
<965000 960000 975000>, /* Supply 1 */
|
||||
<965000 960000 975000>; /* Supply 2 */
|
||||
opp-microamp = <70000>, /* Supply 0 */
|
||||
<0>, /* Supply 1 doesn't need this */
|
||||
<70000>; /* Supply 2 */
|
||||
@ -474,7 +474,7 @@ Example 5: opp-supported-hw
|
||||
*/
|
||||
opp-supported-hw = <0xF 0xFFFFFFFF 0xFFFFFFFF>
|
||||
opp-hz = /bits/ 64 <600000000>;
|
||||
opp-microvolt = <900000 915000 925000>;
|
||||
opp-microvolt = <915000 900000 925000>;
|
||||
...
|
||||
};
|
||||
|
||||
@ -487,7 +487,7 @@ Example 5: opp-supported-hw
|
||||
*/
|
||||
opp-supported-hw = <0x20 0xff0000ff 0x0000f4f0>
|
||||
opp-hz = /bits/ 64 <800000000>;
|
||||
opp-microvolt = <900000 915000 925000>;
|
||||
opp-microvolt = <915000 900000 925000>;
|
||||
...
|
||||
};
|
||||
};
|
||||
@ -512,18 +512,18 @@ Example 6: opp-microvolt-<name>, opp-microamp-<name>:
|
||||
|
||||
opp@1000000000 {
|
||||
opp-hz = /bits/ 64 <1000000000>;
|
||||
opp-microvolt-slow = <900000 915000 925000>;
|
||||
opp-microvolt-fast = <970000 975000 985000>;
|
||||
opp-microvolt-slow = <915000 900000 925000>;
|
||||
opp-microvolt-fast = <975000 970000 985000>;
|
||||
opp-microamp-slow = <70000>;
|
||||
opp-microamp-fast = <71000>;
|
||||
};
|
||||
|
||||
opp@1200000000 {
|
||||
opp-hz = /bits/ 64 <1200000000>;
|
||||
opp-microvolt-slow = <900000 915000 925000>, /* Supply vcc0 */
|
||||
<910000 925000 935000>; /* Supply vcc1 */
|
||||
opp-microvolt-fast = <970000 975000 985000>, /* Supply vcc0 */
|
||||
<960000 965000 975000>; /* Supply vcc1 */
|
||||
opp-microvolt-slow = <915000 900000 925000>, /* Supply vcc0 */
|
||||
<925000 910000 935000>; /* Supply vcc1 */
|
||||
opp-microvolt-fast = <975000 970000 985000>, /* Supply vcc0 */
|
||||
<965000 960000 975000>; /* Supply vcc1 */
|
||||
opp-microamp = <70000>; /* Will be used for both slow/fast */
|
||||
};
|
||||
};
|
||||
|
@ -163,8 +163,7 @@ of flags and remove sysfs attributes pm_qos_no_power_off and pm_qos_remote_wakeu
|
||||
under the device's power directory.
|
||||
|
||||
Notification mechanisms:
|
||||
The per-device PM QoS framework has 2 different and distinct notification trees:
|
||||
a per-device notification tree and a global notification tree.
|
||||
The per-device PM QoS framework has a per-device notification tree.
|
||||
|
||||
int dev_pm_qos_add_notifier(device, notifier):
|
||||
Adds a notification callback function for the device.
|
||||
@ -174,16 +173,6 @@ is changed (for resume latency device PM QoS only).
|
||||
int dev_pm_qos_remove_notifier(device, notifier):
|
||||
Removes the notification callback function for the device.
|
||||
|
||||
int dev_pm_qos_add_global_notifier(notifier):
|
||||
Adds a notification callback function in the global notification tree of the
|
||||
framework.
|
||||
The callback is called when the aggregated value for any device is changed
|
||||
(for resume latency device PM QoS only).
|
||||
|
||||
int dev_pm_qos_remove_global_notifier(notifier):
|
||||
Removes the notification callback function from the global notification tree
|
||||
of the framework.
|
||||
|
||||
|
||||
Active state latency tolerance
|
||||
|
||||
|
@ -100,7 +100,7 @@ knows what to do to handle the device).
|
||||
* If the suspend callback returns an error code different from -EBUSY and
|
||||
-EAGAIN, the PM core regards this as a fatal error and will refuse to run
|
||||
the helper functions described in Section 4 for the device until its status
|
||||
is directly set to either'active', or 'suspended' (the PM core provides
|
||||
is directly set to either 'active', or 'suspended' (the PM core provides
|
||||
special helper functions for this purpose).
|
||||
|
||||
In particular, if the driver requires remote wakeup capability (i.e. hardware
|
||||
@ -217,7 +217,7 @@ defined in include/linux/pm.h:
|
||||
one to complete
|
||||
|
||||
spinlock_t lock;
|
||||
- lock used for synchronisation
|
||||
- lock used for synchronization
|
||||
|
||||
atomic_t usage_count;
|
||||
- the usage counter of the device
|
||||
@ -565,7 +565,7 @@ appropriate to ensure that the device is not put back to sleep during the
|
||||
probe. This can happen with systems such as the network device layer.
|
||||
|
||||
It may be desirable to suspend the device once ->probe() has finished.
|
||||
Therefore the driver core uses the asyncronous pm_request_idle() to submit a
|
||||
Therefore the driver core uses the asynchronous pm_request_idle() to submit a
|
||||
request to execute the subsystem-level idle callback for the device at that
|
||||
time. A driver that makes use of the runtime autosuspend feature, may want to
|
||||
update the last busy mark before returning from ->probe().
|
||||
|
@ -273,6 +273,93 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
|
||||
queue_work(pm_wq, &genpd->power_off_work);
|
||||
}
|
||||
|
||||
/**
|
||||
* genpd_power_off - Remove power from a given PM domain.
|
||||
* @genpd: PM domain to power down.
|
||||
* @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
|
||||
* RPM status of the releated device is in an intermediate state, not yet turned
|
||||
* into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
|
||||
* be RPM_SUSPENDED, while it tries to power off the PM domain.
|
||||
*
|
||||
* If all of the @genpd's devices have been suspended and all of its subdomains
|
||||
* have been powered down, remove power from @genpd.
|
||||
*/
|
||||
static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
|
||||
unsigned int depth)
|
||||
{
|
||||
struct pm_domain_data *pdd;
|
||||
struct gpd_link *link;
|
||||
unsigned int not_suspended = 0;
|
||||
|
||||
/*
|
||||
* Do not try to power off the domain in the following situations:
|
||||
* (1) The domain is already in the "power off" state.
|
||||
* (2) System suspend is in progress.
|
||||
*/
|
||||
if (genpd->status == GPD_STATE_POWER_OFF
|
||||
|| genpd->prepared_count > 0)
|
||||
return 0;
|
||||
|
||||
if (atomic_read(&genpd->sd_count) > 0)
|
||||
return -EBUSY;
|
||||
|
||||
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
|
||||
enum pm_qos_flags_status stat;
|
||||
|
||||
stat = dev_pm_qos_flags(pdd->dev,
|
||||
PM_QOS_FLAG_NO_POWER_OFF
|
||||
| PM_QOS_FLAG_REMOTE_WAKEUP);
|
||||
if (stat > PM_QOS_FLAGS_NONE)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* Do not allow PM domain to be powered off, when an IRQ safe
|
||||
* device is part of a non-IRQ safe domain.
|
||||
*/
|
||||
if (!pm_runtime_suspended(pdd->dev) ||
|
||||
irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
|
||||
not_suspended++;
|
||||
}
|
||||
|
||||
if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
|
||||
return -EBUSY;
|
||||
|
||||
if (genpd->gov && genpd->gov->power_down_ok) {
|
||||
if (!genpd->gov->power_down_ok(&genpd->domain))
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (genpd->power_off) {
|
||||
int ret;
|
||||
|
||||
if (atomic_read(&genpd->sd_count) > 0)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* If sd_count > 0 at this point, one of the subdomains hasn't
|
||||
* managed to call genpd_power_on() for the master yet after
|
||||
* incrementing it. In that case genpd_power_on() will wait
|
||||
* for us to drop the lock, so we can call .power_off() and let
|
||||
* the genpd_power_on() restore power for us (this shouldn't
|
||||
* happen very often).
|
||||
*/
|
||||
ret = _genpd_power_off(genpd, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||
genpd_sd_counter_dec(link->master);
|
||||
genpd_lock_nested(link->master, depth + 1);
|
||||
genpd_power_off(link->master, false, depth + 1);
|
||||
genpd_unlock(link->master);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* genpd_power_on - Restore power to a given PM domain and its masters.
|
||||
* @genpd: PM domain to power up.
|
||||
@ -321,7 +408,9 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
|
||||
&genpd->slave_links,
|
||||
slave_node) {
|
||||
genpd_sd_counter_dec(link->master);
|
||||
genpd_queue_power_off_work(link->master);
|
||||
genpd_lock_nested(link->master, depth + 1);
|
||||
genpd_power_off(link->master, false, depth + 1);
|
||||
genpd_unlock(link->master);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -367,87 +456,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
/**
|
||||
* genpd_power_off - Remove power from a given PM domain.
|
||||
* @genpd: PM domain to power down.
|
||||
* @is_async: PM domain is powered down from a scheduled work
|
||||
*
|
||||
* If all of the @genpd's devices have been suspended and all of its subdomains
|
||||
* have been powered down, remove power from @genpd.
|
||||
*/
|
||||
static int genpd_power_off(struct generic_pm_domain *genpd, bool is_async)
|
||||
{
|
||||
struct pm_domain_data *pdd;
|
||||
struct gpd_link *link;
|
||||
unsigned int not_suspended = 0;
|
||||
|
||||
/*
|
||||
* Do not try to power off the domain in the following situations:
|
||||
* (1) The domain is already in the "power off" state.
|
||||
* (2) System suspend is in progress.
|
||||
*/
|
||||
if (genpd->status == GPD_STATE_POWER_OFF
|
||||
|| genpd->prepared_count > 0)
|
||||
return 0;
|
||||
|
||||
if (atomic_read(&genpd->sd_count) > 0)
|
||||
return -EBUSY;
|
||||
|
||||
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
|
||||
enum pm_qos_flags_status stat;
|
||||
|
||||
stat = dev_pm_qos_flags(pdd->dev,
|
||||
PM_QOS_FLAG_NO_POWER_OFF
|
||||
| PM_QOS_FLAG_REMOTE_WAKEUP);
|
||||
if (stat > PM_QOS_FLAGS_NONE)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* Do not allow PM domain to be powered off, when an IRQ safe
|
||||
* device is part of a non-IRQ safe domain.
|
||||
*/
|
||||
if (!pm_runtime_suspended(pdd->dev) ||
|
||||
irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
|
||||
not_suspended++;
|
||||
}
|
||||
|
||||
if (not_suspended > 1 || (not_suspended == 1 && is_async))
|
||||
return -EBUSY;
|
||||
|
||||
if (genpd->gov && genpd->gov->power_down_ok) {
|
||||
if (!genpd->gov->power_down_ok(&genpd->domain))
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (genpd->power_off) {
|
||||
int ret;
|
||||
|
||||
if (atomic_read(&genpd->sd_count) > 0)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* If sd_count > 0 at this point, one of the subdomains hasn't
|
||||
* managed to call genpd_power_on() for the master yet after
|
||||
* incrementing it. In that case genpd_power_on() will wait
|
||||
* for us to drop the lock, so we can call .power_off() and let
|
||||
* the genpd_power_on() restore power for us (this shouldn't
|
||||
* happen very often).
|
||||
*/
|
||||
ret = _genpd_power_off(genpd, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||
genpd_sd_counter_dec(link->master);
|
||||
genpd_queue_power_off_work(link->master);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
|
||||
* @work: Work structure used for scheduling the execution of this function.
|
||||
@ -459,7 +467,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
|
||||
genpd = container_of(work, struct generic_pm_domain, power_off_work);
|
||||
|
||||
genpd_lock(genpd);
|
||||
genpd_power_off(genpd, true);
|
||||
genpd_power_off(genpd, false, 0);
|
||||
genpd_unlock(genpd);
|
||||
}
|
||||
|
||||
@ -578,7 +586,7 @@ static int genpd_runtime_suspend(struct device *dev)
|
||||
return 0;
|
||||
|
||||
genpd_lock(genpd);
|
||||
genpd_power_off(genpd, false);
|
||||
genpd_power_off(genpd, true, 0);
|
||||
genpd_unlock(genpd);
|
||||
|
||||
return 0;
|
||||
@ -658,7 +666,7 @@ err_poweroff:
|
||||
if (!pm_runtime_is_irq_safe(dev) ||
|
||||
(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
|
||||
genpd_lock(genpd);
|
||||
genpd_power_off(genpd, 0);
|
||||
genpd_power_off(genpd, true, 0);
|
||||
genpd_unlock(genpd);
|
||||
}
|
||||
|
||||
|
@ -231,7 +231,8 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
|
||||
* The caller needs to ensure that opp_table (and hence the regulator)
|
||||
* isn't freed, while we are executing this routine.
|
||||
*/
|
||||
for (i = 0; reg = regulators[i], i < count; i++) {
|
||||
for (i = 0; i < count; i++) {
|
||||
reg = regulators[i];
|
||||
ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
|
||||
if (ret > 0)
|
||||
latency_ns += ret * 1000;
|
||||
|
@ -17,12 +17,9 @@
|
||||
*
|
||||
* This QoS design is best effort based. Dependents register their QoS needs.
|
||||
* Watchers register to keep track of the current QoS needs of the system.
|
||||
* Watchers can register different types of notification callbacks:
|
||||
* . a per-device notification callback using the dev_pm_qos_*_notifier API.
|
||||
* The notification chain data is stored in the per-device constraint
|
||||
* data struct.
|
||||
* . a system-wide notification callback using the dev_pm_qos_*_global_notifier
|
||||
* API. The notification chain data is stored in a static variable.
|
||||
* Watchers can register a per-device notification callback using the
|
||||
* dev_pm_qos_*_notifier API. The notification chain data is stored in the
|
||||
* per-device constraint data struct.
|
||||
*
|
||||
* Note about the per-device constraint data struct allocation:
|
||||
* . The per-device constraints data struct ptr is tored into the device
|
||||
@ -49,8 +46,6 @@
|
||||
static DEFINE_MUTEX(dev_pm_qos_mtx);
|
||||
static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
|
||||
|
||||
/**
|
||||
* __dev_pm_qos_flags - Check PM QoS flags for a given device.
|
||||
* @dev: Device to check the PM QoS flags for.
|
||||
@ -108,8 +103,7 @@ s32 __dev_pm_qos_read_value(struct device *dev)
|
||||
{
|
||||
lockdep_assert_held(&dev->power.lock);
|
||||
|
||||
return IS_ERR_OR_NULL(dev->power.qos) ?
|
||||
0 : pm_qos_read_value(&dev->power.qos->resume_latency);
|
||||
return dev_pm_qos_raw_read_value(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -135,8 +129,7 @@ s32 dev_pm_qos_read_value(struct device *dev)
|
||||
* @value: Value to assign to the QoS request.
|
||||
*
|
||||
* Internal function to update the constraints list using the PM QoS core
|
||||
* code and if needed call the per-device and the global notification
|
||||
* callbacks
|
||||
* code and if needed call the per-device callbacks.
|
||||
*/
|
||||
static int apply_constraint(struct dev_pm_qos_request *req,
|
||||
enum pm_qos_req_action action, s32 value)
|
||||
@ -148,12 +141,6 @@ static int apply_constraint(struct dev_pm_qos_request *req,
|
||||
case DEV_PM_QOS_RESUME_LATENCY:
|
||||
ret = pm_qos_update_target(&qos->resume_latency,
|
||||
&req->data.pnode, action, value);
|
||||
if (ret) {
|
||||
value = pm_qos_read_value(&qos->resume_latency);
|
||||
blocking_notifier_call_chain(&dev_pm_notifiers,
|
||||
(unsigned long)value,
|
||||
req);
|
||||
}
|
||||
break;
|
||||
case DEV_PM_QOS_LATENCY_TOLERANCE:
|
||||
ret = pm_qos_update_target(&qos->latency_tolerance,
|
||||
@ -535,36 +522,6 @@ int dev_pm_qos_remove_notifier(struct device *dev,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_add_global_notifier - sets notification entry for changes to
|
||||
* target value of the PM QoS constraints for any device
|
||||
*
|
||||
* @notifier: notifier block managed by caller.
|
||||
*
|
||||
* Will register the notifier into a notification chain that gets called
|
||||
* upon changes to the target value for any device.
|
||||
*/
|
||||
int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
|
||||
{
|
||||
return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_remove_global_notifier - deletes notification for changes to
|
||||
* target value of PM QoS constraints for any device
|
||||
*
|
||||
* @notifier: notifier block to be removed.
|
||||
*
|
||||
* Will remove the notifier from the notification chain that gets called
|
||||
* upon changes to the target value for any device.
|
||||
*/
|
||||
int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
|
||||
{
|
||||
return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
|
||||
* @dev: Device whose ancestor to add the request for.
|
||||
|
@ -364,37 +364,25 @@ static bool driver_registered __read_mostly;
|
||||
static bool acpi_ppc;
|
||||
#endif
|
||||
|
||||
static struct perf_limits performance_limits = {
|
||||
.no_turbo = 0,
|
||||
.turbo_disabled = 0,
|
||||
.max_perf_pct = 100,
|
||||
.max_perf = int_ext_tofp(1),
|
||||
.min_perf_pct = 100,
|
||||
.min_perf = int_ext_tofp(1),
|
||||
.max_policy_pct = 100,
|
||||
.max_sysfs_pct = 100,
|
||||
.min_policy_pct = 0,
|
||||
.min_sysfs_pct = 0,
|
||||
};
|
||||
static struct perf_limits performance_limits;
|
||||
static struct perf_limits powersave_limits;
|
||||
static struct perf_limits *limits;
|
||||
|
||||
static struct perf_limits powersave_limits = {
|
||||
.no_turbo = 0,
|
||||
.turbo_disabled = 0,
|
||||
.max_perf_pct = 100,
|
||||
.max_perf = int_ext_tofp(1),
|
||||
.min_perf_pct = 0,
|
||||
.min_perf = 0,
|
||||
.max_policy_pct = 100,
|
||||
.max_sysfs_pct = 100,
|
||||
.min_policy_pct = 0,
|
||||
.min_sysfs_pct = 0,
|
||||
};
|
||||
static void intel_pstate_init_limits(struct perf_limits *limits)
|
||||
{
|
||||
memset(limits, 0, sizeof(*limits));
|
||||
limits->max_perf_pct = 100;
|
||||
limits->max_perf = int_ext_tofp(1);
|
||||
limits->max_policy_pct = 100;
|
||||
limits->max_sysfs_pct = 100;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
|
||||
static struct perf_limits *limits = &performance_limits;
|
||||
#else
|
||||
static struct perf_limits *limits = &powersave_limits;
|
||||
#endif
|
||||
static void intel_pstate_set_performance_limits(struct perf_limits *limits)
|
||||
{
|
||||
intel_pstate_init_limits(limits);
|
||||
limits->min_perf_pct = 100;
|
||||
limits->min_perf = int_ext_tofp(1);
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(intel_pstate_driver_lock);
|
||||
static DEFINE_MUTEX(intel_pstate_limits_lock);
|
||||
@ -2084,20 +2072,6 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
|
||||
synchronize_sched();
|
||||
}
|
||||
|
||||
static void intel_pstate_set_performance_limits(struct perf_limits *limits)
|
||||
{
|
||||
limits->no_turbo = 0;
|
||||
limits->turbo_disabled = 0;
|
||||
limits->max_perf_pct = 100;
|
||||
limits->max_perf = int_ext_tofp(1);
|
||||
limits->min_perf_pct = 100;
|
||||
limits->min_perf = int_ext_tofp(1);
|
||||
limits->max_policy_pct = 100;
|
||||
limits->max_sysfs_pct = 100;
|
||||
limits->min_policy_pct = 0;
|
||||
limits->min_sysfs_pct = 0;
|
||||
}
|
||||
|
||||
static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
|
||||
struct perf_limits *limits)
|
||||
{
|
||||
@ -2466,6 +2440,11 @@ static int intel_pstate_register_driver(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
intel_pstate_init_limits(&powersave_limits);
|
||||
intel_pstate_set_performance_limits(&performance_limits);
|
||||
limits = IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) ?
|
||||
&performance_limits : &powersave_limits;
|
||||
|
||||
ret = cpufreq_register_driver(intel_pstate_driver);
|
||||
if (ret) {
|
||||
intel_pstate_driver_cleanup();
|
||||
|
@ -23,10 +23,6 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#if !defined(CONFIG_ARM)
|
||||
#include <asm/smp.h> /* for get_hard_smp_processor_id() in UP configs */
|
||||
#endif
|
||||
|
||||
/**
|
||||
* struct cpu_data
|
||||
* @pclk: the parent clock of cpu
|
||||
|
@ -287,7 +287,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
unsigned int interactivity_req;
|
||||
unsigned int expected_interval;
|
||||
unsigned long nr_iowaiters, cpu_load;
|
||||
int resume_latency = dev_pm_qos_read_value(device);
|
||||
int resume_latency = dev_pm_qos_raw_read_value(device);
|
||||
|
||||
if (data->needs_update) {
|
||||
menu_update(drv, dev);
|
||||
|
@ -125,7 +125,7 @@ static struct cpuidle_state *cpuidle_state_table;
|
||||
*/
|
||||
static struct cpuidle_state nehalem_cstates[] = {
|
||||
{
|
||||
.name = "C1-NHM",
|
||||
.name = "C1",
|
||||
.desc = "MWAIT 0x00",
|
||||
.flags = MWAIT2flg(0x00),
|
||||
.exit_latency = 3,
|
||||
@ -133,7 +133,7 @@ static struct cpuidle_state nehalem_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C1E-NHM",
|
||||
.name = "C1E",
|
||||
.desc = "MWAIT 0x01",
|
||||
.flags = MWAIT2flg(0x01),
|
||||
.exit_latency = 10,
|
||||
@ -141,7 +141,7 @@ static struct cpuidle_state nehalem_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C3-NHM",
|
||||
.name = "C3",
|
||||
.desc = "MWAIT 0x10",
|
||||
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 20,
|
||||
@ -149,7 +149,7 @@ static struct cpuidle_state nehalem_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6-NHM",
|
||||
.name = "C6",
|
||||
.desc = "MWAIT 0x20",
|
||||
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 200,
|
||||
@ -162,7 +162,7 @@ static struct cpuidle_state nehalem_cstates[] = {
|
||||
|
||||
static struct cpuidle_state snb_cstates[] = {
|
||||
{
|
||||
.name = "C1-SNB",
|
||||
.name = "C1",
|
||||
.desc = "MWAIT 0x00",
|
||||
.flags = MWAIT2flg(0x00),
|
||||
.exit_latency = 2,
|
||||
@ -170,7 +170,7 @@ static struct cpuidle_state snb_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C1E-SNB",
|
||||
.name = "C1E",
|
||||
.desc = "MWAIT 0x01",
|
||||
.flags = MWAIT2flg(0x01),
|
||||
.exit_latency = 10,
|
||||
@ -178,7 +178,7 @@ static struct cpuidle_state snb_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C3-SNB",
|
||||
.name = "C3",
|
||||
.desc = "MWAIT 0x10",
|
||||
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 80,
|
||||
@ -186,7 +186,7 @@ static struct cpuidle_state snb_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6-SNB",
|
||||
.name = "C6",
|
||||
.desc = "MWAIT 0x20",
|
||||
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 104,
|
||||
@ -194,7 +194,7 @@ static struct cpuidle_state snb_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C7-SNB",
|
||||
.name = "C7",
|
||||
.desc = "MWAIT 0x30",
|
||||
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 109,
|
||||
@ -207,7 +207,7 @@ static struct cpuidle_state snb_cstates[] = {
|
||||
|
||||
static struct cpuidle_state byt_cstates[] = {
|
||||
{
|
||||
.name = "C1-BYT",
|
||||
.name = "C1",
|
||||
.desc = "MWAIT 0x00",
|
||||
.flags = MWAIT2flg(0x00),
|
||||
.exit_latency = 1,
|
||||
@ -215,7 +215,7 @@ static struct cpuidle_state byt_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6N-BYT",
|
||||
.name = "C6N",
|
||||
.desc = "MWAIT 0x58",
|
||||
.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 300,
|
||||
@ -223,7 +223,7 @@ static struct cpuidle_state byt_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6S-BYT",
|
||||
.name = "C6S",
|
||||
.desc = "MWAIT 0x52",
|
||||
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 500,
|
||||
@ -231,7 +231,7 @@ static struct cpuidle_state byt_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C7-BYT",
|
||||
.name = "C7",
|
||||
.desc = "MWAIT 0x60",
|
||||
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 1200,
|
||||
@ -239,7 +239,7 @@ static struct cpuidle_state byt_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C7S-BYT",
|
||||
.name = "C7S",
|
||||
.desc = "MWAIT 0x64",
|
||||
.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 10000,
|
||||
@ -252,7 +252,7 @@ static struct cpuidle_state byt_cstates[] = {
|
||||
|
||||
static struct cpuidle_state cht_cstates[] = {
|
||||
{
|
||||
.name = "C1-CHT",
|
||||
.name = "C1",
|
||||
.desc = "MWAIT 0x00",
|
||||
.flags = MWAIT2flg(0x00),
|
||||
.exit_latency = 1,
|
||||
@ -260,7 +260,7 @@ static struct cpuidle_state cht_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6N-CHT",
|
||||
.name = "C6N",
|
||||
.desc = "MWAIT 0x58",
|
||||
.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 80,
|
||||
@ -268,7 +268,7 @@ static struct cpuidle_state cht_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6S-CHT",
|
||||
.name = "C6S",
|
||||
.desc = "MWAIT 0x52",
|
||||
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 200,
|
||||
@ -276,7 +276,7 @@ static struct cpuidle_state cht_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C7-CHT",
|
||||
.name = "C7",
|
||||
.desc = "MWAIT 0x60",
|
||||
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 1200,
|
||||
@ -284,7 +284,7 @@ static struct cpuidle_state cht_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C7S-CHT",
|
||||
.name = "C7S",
|
||||
.desc = "MWAIT 0x64",
|
||||
.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 10000,
|
||||
@ -297,7 +297,7 @@ static struct cpuidle_state cht_cstates[] = {
|
||||
|
||||
static struct cpuidle_state ivb_cstates[] = {
|
||||
{
|
||||
.name = "C1-IVB",
|
||||
.name = "C1",
|
||||
.desc = "MWAIT 0x00",
|
||||
.flags = MWAIT2flg(0x00),
|
||||
.exit_latency = 1,
|
||||
@ -305,7 +305,7 @@ static struct cpuidle_state ivb_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C1E-IVB",
|
||||
.name = "C1E",
|
||||
.desc = "MWAIT 0x01",
|
||||
.flags = MWAIT2flg(0x01),
|
||||
.exit_latency = 10,
|
||||
@ -313,7 +313,7 @@ static struct cpuidle_state ivb_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C3-IVB",
|
||||
.name = "C3",
|
||||
.desc = "MWAIT 0x10",
|
||||
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 59,
|
||||
@ -321,7 +321,7 @@ static struct cpuidle_state ivb_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6-IVB",
|
||||
.name = "C6",
|
||||
.desc = "MWAIT 0x20",
|
||||
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 80,
|
||||
@ -329,7 +329,7 @@ static struct cpuidle_state ivb_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C7-IVB",
|
||||
.name = "C7",
|
||||
.desc = "MWAIT 0x30",
|
||||
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 87,
|
||||
@ -342,7 +342,7 @@ static struct cpuidle_state ivb_cstates[] = {
|
||||
|
||||
static struct cpuidle_state ivt_cstates[] = {
|
||||
{
|
||||
.name = "C1-IVT",
|
||||
.name = "C1",
|
||||
.desc = "MWAIT 0x00",
|
||||
.flags = MWAIT2flg(0x00),
|
||||
.exit_latency = 1,
|
||||
@ -350,7 +350,7 @@ static struct cpuidle_state ivt_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C1E-IVT",
|
||||
.name = "C1E",
|
||||
.desc = "MWAIT 0x01",
|
||||
.flags = MWAIT2flg(0x01),
|
||||
.exit_latency = 10,
|
||||
@ -358,7 +358,7 @@ static struct cpuidle_state ivt_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C3-IVT",
|
||||
.name = "C3",
|
||||
.desc = "MWAIT 0x10",
|
||||
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 59,
|
||||
@ -366,7 +366,7 @@ static struct cpuidle_state ivt_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6-IVT",
|
||||
.name = "C6",
|
||||
.desc = "MWAIT 0x20",
|
||||
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 82,
|
||||
@ -379,7 +379,7 @@ static struct cpuidle_state ivt_cstates[] = {
|
||||
|
||||
static struct cpuidle_state ivt_cstates_4s[] = {
|
||||
{
|
||||
.name = "C1-IVT-4S",
|
||||
.name = "C1",
|
||||
.desc = "MWAIT 0x00",
|
||||
.flags = MWAIT2flg(0x00),
|
||||
.exit_latency = 1,
|
||||
@ -387,7 +387,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C1E-IVT-4S",
|
||||
.name = "C1E",
|
||||
.desc = "MWAIT 0x01",
|
||||
.flags = MWAIT2flg(0x01),
|
||||
.exit_latency = 10,
|
||||
@ -395,7 +395,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C3-IVT-4S",
|
||||
.name = "C3",
|
||||
.desc = "MWAIT 0x10",
|
||||
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 59,
|
||||
@ -403,7 +403,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6-IVT-4S",
|
||||
.name = "C6",
|
||||
.desc = "MWAIT 0x20",
|
||||
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 84,
|
||||
@ -416,7 +416,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
|
||||
|
||||
static struct cpuidle_state ivt_cstates_8s[] = {
|
||||
{
|
||||
.name = "C1-IVT-8S",
|
||||
.name = "C1",
|
||||
.desc = "MWAIT 0x00",
|
||||
.flags = MWAIT2flg(0x00),
|
||||
.exit_latency = 1,
|
||||
@ -424,7 +424,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C1E-IVT-8S",
|
||||
.name = "C1E",
|
||||
.desc = "MWAIT 0x01",
|
||||
.flags = MWAIT2flg(0x01),
|
||||
.exit_latency = 10,
|
||||
@ -432,7 +432,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C3-IVT-8S",
|
||||
.name = "C3",
|
||||
.desc = "MWAIT 0x10",
|
||||
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 59,
|
||||
@ -440,7 +440,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6-IVT-8S",
|
||||
.name = "C6",
|
||||
.desc = "MWAIT 0x20",
|
||||
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 88,
|
||||
@ -453,7 +453,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
|
||||
|
||||
static struct cpuidle_state hsw_cstates[] = {
|
||||
{
|
||||
.name = "C1-HSW",
|
||||
.name = "C1",
|
||||
.desc = "MWAIT 0x00",
|
||||
.flags = MWAIT2flg(0x00),
|
||||
.exit_latency = 2,
|
||||
@ -461,7 +461,7 @@ static struct cpuidle_state hsw_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C1E-HSW",
|
||||
.name = "C1E",
|
||||
.desc = "MWAIT 0x01",
|
||||
.flags = MWAIT2flg(0x01),
|
||||
.exit_latency = 10,
|
||||
@ -469,7 +469,7 @@ static struct cpuidle_state hsw_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C3-HSW",
|
||||
.name = "C3",
|
||||
.desc = "MWAIT 0x10",
|
||||
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 33,
|
||||
@ -477,7 +477,7 @@ static struct cpuidle_state hsw_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6-HSW",
|
||||
.name = "C6",
|
||||
.desc = "MWAIT 0x20",
|
||||
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 133,
|
||||
@ -485,7 +485,7 @@ static struct cpuidle_state hsw_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C7s-HSW",
|
||||
.name = "C7s",
|
||||
.desc = "MWAIT 0x32",
|
||||
.flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 166,
|
||||
@ -493,7 +493,7 @@ static struct cpuidle_state hsw_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C8-HSW",
|
||||
.name = "C8",
|
||||
.desc = "MWAIT 0x40",
|
||||
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 300,
|
||||
@ -501,7 +501,7 @@ static struct cpuidle_state hsw_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C9-HSW",
|
||||
.name = "C9",
|
||||
.desc = "MWAIT 0x50",
|
||||
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 600,
|
||||
@ -509,7 +509,7 @@ static struct cpuidle_state hsw_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C10-HSW",
|
||||
.name = "C10",
|
||||
.desc = "MWAIT 0x60",
|
||||
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 2600,
|
||||
@ -521,7 +521,7 @@ static struct cpuidle_state hsw_cstates[] = {
|
||||
};
|
||||
static struct cpuidle_state bdw_cstates[] = {
|
||||
{
|
||||
.name = "C1-BDW",
|
||||
.name = "C1",
|
||||
.desc = "MWAIT 0x00",
|
||||
.flags = MWAIT2flg(0x00),
|
||||
.exit_latency = 2,
|
||||
@ -529,7 +529,7 @@ static struct cpuidle_state bdw_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C1E-BDW",
|
||||
.name = "C1E",
|
||||
.desc = "MWAIT 0x01",
|
||||
.flags = MWAIT2flg(0x01),
|
||||
.exit_latency = 10,
|
||||
@ -537,7 +537,7 @@ static struct cpuidle_state bdw_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C3-BDW",
|
||||
.name = "C3",
|
||||
.desc = "MWAIT 0x10",
|
||||
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 40,
|
||||
@ -545,7 +545,7 @@ static struct cpuidle_state bdw_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6-BDW",
|
||||
.name = "C6",
|
||||
.desc = "MWAIT 0x20",
|
||||
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 133,
|
||||
@ -553,7 +553,7 @@ static struct cpuidle_state bdw_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C7s-BDW",
|
||||
.name = "C7s",
|
||||
.desc = "MWAIT 0x32",
|
||||
.flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 166,
|
||||
@ -561,7 +561,7 @@ static struct cpuidle_state bdw_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C8-BDW",
|
||||
.name = "C8",
|
||||
.desc = "MWAIT 0x40",
|
||||
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 300,
|
||||
@ -569,7 +569,7 @@ static struct cpuidle_state bdw_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C9-BDW",
|
||||
.name = "C9",
|
||||
.desc = "MWAIT 0x50",
|
||||
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 600,
|
||||
@ -577,7 +577,7 @@ static struct cpuidle_state bdw_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C10-BDW",
|
||||
.name = "C10",
|
||||
.desc = "MWAIT 0x60",
|
||||
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 2600,
|
||||
@ -590,7 +590,7 @@ static struct cpuidle_state bdw_cstates[] = {
|
||||
|
||||
static struct cpuidle_state skl_cstates[] = {
|
||||
{
|
||||
.name = "C1-SKL",
|
||||
.name = "C1",
|
||||
.desc = "MWAIT 0x00",
|
||||
.flags = MWAIT2flg(0x00),
|
||||
.exit_latency = 2,
|
||||
@ -598,7 +598,7 @@ static struct cpuidle_state skl_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C1E-SKL",
|
||||
.name = "C1E",
|
||||
.desc = "MWAIT 0x01",
|
||||
.flags = MWAIT2flg(0x01),
|
||||
.exit_latency = 10,
|
||||
@ -606,7 +606,7 @@ static struct cpuidle_state skl_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C3-SKL",
|
||||
.name = "C3",
|
||||
.desc = "MWAIT 0x10",
|
||||
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 70,
|
||||
@ -614,7 +614,7 @@ static struct cpuidle_state skl_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6-SKL",
|
||||
.name = "C6",
|
||||
.desc = "MWAIT 0x20",
|
||||
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 85,
|
||||
@ -622,7 +622,7 @@ static struct cpuidle_state skl_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C7s-SKL",
|
||||
.name = "C7s",
|
||||
.desc = "MWAIT 0x33",
|
||||
.flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 124,
|
||||
@ -630,7 +630,7 @@ static struct cpuidle_state skl_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C8-SKL",
|
||||
.name = "C8",
|
||||
.desc = "MWAIT 0x40",
|
||||
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 200,
|
||||
@ -638,7 +638,7 @@ static struct cpuidle_state skl_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C9-SKL",
|
||||
.name = "C9",
|
||||
.desc = "MWAIT 0x50",
|
||||
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 480,
|
||||
@ -646,7 +646,7 @@ static struct cpuidle_state skl_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C10-SKL",
|
||||
.name = "C10",
|
||||
.desc = "MWAIT 0x60",
|
||||
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 890,
|
||||
@ -659,7 +659,7 @@ static struct cpuidle_state skl_cstates[] = {
|
||||
|
||||
static struct cpuidle_state skx_cstates[] = {
|
||||
{
|
||||
.name = "C1-SKX",
|
||||
.name = "C1",
|
||||
.desc = "MWAIT 0x00",
|
||||
.flags = MWAIT2flg(0x00),
|
||||
.exit_latency = 2,
|
||||
@ -667,7 +667,7 @@ static struct cpuidle_state skx_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C1E-SKX",
|
||||
.name = "C1E",
|
||||
.desc = "MWAIT 0x01",
|
||||
.flags = MWAIT2flg(0x01),
|
||||
.exit_latency = 10,
|
||||
@ -675,7 +675,7 @@ static struct cpuidle_state skx_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6-SKX",
|
||||
.name = "C6",
|
||||
.desc = "MWAIT 0x20",
|
||||
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 133,
|
||||
@ -688,7 +688,7 @@ static struct cpuidle_state skx_cstates[] = {
|
||||
|
||||
static struct cpuidle_state atom_cstates[] = {
|
||||
{
|
||||
.name = "C1E-ATM",
|
||||
.name = "C1E",
|
||||
.desc = "MWAIT 0x00",
|
||||
.flags = MWAIT2flg(0x00),
|
||||
.exit_latency = 10,
|
||||
@ -696,7 +696,7 @@ static struct cpuidle_state atom_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C2-ATM",
|
||||
.name = "C2",
|
||||
.desc = "MWAIT 0x10",
|
||||
.flags = MWAIT2flg(0x10),
|
||||
.exit_latency = 20,
|
||||
@ -704,7 +704,7 @@ static struct cpuidle_state atom_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C4-ATM",
|
||||
.name = "C4",
|
||||
.desc = "MWAIT 0x30",
|
||||
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 100,
|
||||
@ -712,7 +712,7 @@ static struct cpuidle_state atom_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6-ATM",
|
||||
.name = "C6",
|
||||
.desc = "MWAIT 0x52",
|
||||
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 140,
|
||||
@ -724,7 +724,7 @@ static struct cpuidle_state atom_cstates[] = {
|
||||
};
|
||||
static struct cpuidle_state tangier_cstates[] = {
|
||||
{
|
||||
.name = "C1-TNG",
|
||||
.name = "C1",
|
||||
.desc = "MWAIT 0x00",
|
||||
.flags = MWAIT2flg(0x00),
|
||||
.exit_latency = 1,
|
||||
@ -732,7 +732,7 @@ static struct cpuidle_state tangier_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C4-TNG",
|
||||
.name = "C4",
|
||||
.desc = "MWAIT 0x30",
|
||||
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 100,
|
||||
@ -740,7 +740,7 @@ static struct cpuidle_state tangier_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6-TNG",
|
||||
.name = "C6",
|
||||
.desc = "MWAIT 0x52",
|
||||
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 140,
|
||||
@ -748,7 +748,7 @@ static struct cpuidle_state tangier_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C7-TNG",
|
||||
.name = "C7",
|
||||
.desc = "MWAIT 0x60",
|
||||
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 1200,
|
||||
@ -756,7 +756,7 @@ static struct cpuidle_state tangier_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C9-TNG",
|
||||
.name = "C9",
|
||||
.desc = "MWAIT 0x64",
|
||||
.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 10000,
|
||||
@ -768,7 +768,7 @@ static struct cpuidle_state tangier_cstates[] = {
|
||||
};
|
||||
static struct cpuidle_state avn_cstates[] = {
|
||||
{
|
||||
.name = "C1-AVN",
|
||||
.name = "C1",
|
||||
.desc = "MWAIT 0x00",
|
||||
.flags = MWAIT2flg(0x00),
|
||||
.exit_latency = 2,
|
||||
@ -776,7 +776,7 @@ static struct cpuidle_state avn_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6-AVN",
|
||||
.name = "C6",
|
||||
.desc = "MWAIT 0x51",
|
||||
.flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 15,
|
||||
@ -788,7 +788,7 @@ static struct cpuidle_state avn_cstates[] = {
|
||||
};
|
||||
static struct cpuidle_state knl_cstates[] = {
|
||||
{
|
||||
.name = "C1-KNL",
|
||||
.name = "C1",
|
||||
.desc = "MWAIT 0x00",
|
||||
.flags = MWAIT2flg(0x00),
|
||||
.exit_latency = 1,
|
||||
@ -796,7 +796,7 @@ static struct cpuidle_state knl_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze },
|
||||
{
|
||||
.name = "C6-KNL",
|
||||
.name = "C6",
|
||||
.desc = "MWAIT 0x10",
|
||||
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 120,
|
||||
@ -809,7 +809,7 @@ static struct cpuidle_state knl_cstates[] = {
|
||||
|
||||
static struct cpuidle_state bxt_cstates[] = {
|
||||
{
|
||||
.name = "C1-BXT",
|
||||
.name = "C1",
|
||||
.desc = "MWAIT 0x00",
|
||||
.flags = MWAIT2flg(0x00),
|
||||
.exit_latency = 2,
|
||||
@ -817,7 +817,7 @@ static struct cpuidle_state bxt_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C1E-BXT",
|
||||
.name = "C1E",
|
||||
.desc = "MWAIT 0x01",
|
||||
.flags = MWAIT2flg(0x01),
|
||||
.exit_latency = 10,
|
||||
@ -825,7 +825,7 @@ static struct cpuidle_state bxt_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6-BXT",
|
||||
.name = "C6",
|
||||
.desc = "MWAIT 0x20",
|
||||
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 133,
|
||||
@ -833,7 +833,7 @@ static struct cpuidle_state bxt_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C7s-BXT",
|
||||
.name = "C7s",
|
||||
.desc = "MWAIT 0x31",
|
||||
.flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 155,
|
||||
@ -841,7 +841,7 @@ static struct cpuidle_state bxt_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C8-BXT",
|
||||
.name = "C8",
|
||||
.desc = "MWAIT 0x40",
|
||||
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 1000,
|
||||
@ -849,7 +849,7 @@ static struct cpuidle_state bxt_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C9-BXT",
|
||||
.name = "C9",
|
||||
.desc = "MWAIT 0x50",
|
||||
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 2000,
|
||||
@ -857,7 +857,7 @@ static struct cpuidle_state bxt_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C10-BXT",
|
||||
.name = "C10",
|
||||
.desc = "MWAIT 0x60",
|
||||
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 10000,
|
||||
@ -870,7 +870,7 @@ static struct cpuidle_state bxt_cstates[] = {
|
||||
|
||||
static struct cpuidle_state dnv_cstates[] = {
|
||||
{
|
||||
.name = "C1-DNV",
|
||||
.name = "C1",
|
||||
.desc = "MWAIT 0x00",
|
||||
.flags = MWAIT2flg(0x00),
|
||||
.exit_latency = 2,
|
||||
@ -878,7 +878,7 @@ static struct cpuidle_state dnv_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C1E-DNV",
|
||||
.name = "C1E",
|
||||
.desc = "MWAIT 0x01",
|
||||
.flags = MWAIT2flg(0x01),
|
||||
.exit_latency = 10,
|
||||
@ -886,7 +886,7 @@ static struct cpuidle_state dnv_cstates[] = {
|
||||
.enter = &intel_idle,
|
||||
.enter_freeze = intel_idle_freeze, },
|
||||
{
|
||||
.name = "C6-DNV",
|
||||
.name = "C6",
|
||||
.desc = "MWAIT 0x20",
|
||||
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 50,
|
||||
|
@ -146,8 +146,6 @@ int dev_pm_qos_add_notifier(struct device *dev,
|
||||
struct notifier_block *notifier);
|
||||
int dev_pm_qos_remove_notifier(struct device *dev,
|
||||
struct notifier_block *notifier);
|
||||
int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
|
||||
int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
|
||||
void dev_pm_qos_constraints_init(struct device *dev);
|
||||
void dev_pm_qos_constraints_destroy(struct device *dev);
|
||||
int dev_pm_qos_add_ancestor_request(struct device *dev,
|
||||
@ -172,6 +170,12 @@ static inline s32 dev_pm_qos_requested_flags(struct device *dev)
|
||||
{
|
||||
return dev->power.qos->flags_req->data.flr.flags;
|
||||
}
|
||||
|
||||
static inline s32 dev_pm_qos_raw_read_value(struct device *dev)
|
||||
{
|
||||
return IS_ERR_OR_NULL(dev->power.qos) ?
|
||||
0 : pm_qos_read_value(&dev->power.qos->resume_latency);
|
||||
}
|
||||
#else
|
||||
static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
|
||||
s32 mask)
|
||||
@ -199,12 +203,6 @@ static inline int dev_pm_qos_add_notifier(struct device *dev,
|
||||
static inline int dev_pm_qos_remove_notifier(struct device *dev,
|
||||
struct notifier_block *notifier)
|
||||
{ return 0; }
|
||||
static inline int dev_pm_qos_add_global_notifier(
|
||||
struct notifier_block *notifier)
|
||||
{ return 0; }
|
||||
static inline int dev_pm_qos_remove_global_notifier(
|
||||
struct notifier_block *notifier)
|
||||
{ return 0; }
|
||||
static inline void dev_pm_qos_constraints_init(struct device *dev)
|
||||
{
|
||||
dev->power.power_state = PMSG_ON;
|
||||
@ -236,6 +234,7 @@ static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}
|
||||
|
||||
static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
|
||||
static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
|
||||
static inline s32 dev_pm_qos_raw_read_value(struct device *dev) { return 0; }
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -10,6 +10,8 @@
|
||||
* This file is released under the GPLv2.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "PM: " fmt
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/syscalls.h>
|
||||
@ -104,7 +106,7 @@ EXPORT_SYMBOL(system_entering_hibernation);
|
||||
#ifdef CONFIG_PM_DEBUG
|
||||
static void hibernation_debug_sleep(void)
|
||||
{
|
||||
printk(KERN_INFO "hibernation debug: Waiting for 5 seconds.\n");
|
||||
pr_info("hibernation debug: Waiting for 5 seconds.\n");
|
||||
mdelay(5000);
|
||||
}
|
||||
|
||||
@ -250,10 +252,9 @@ void swsusp_show_speed(ktime_t start, ktime_t stop,
|
||||
centisecs = 1; /* avoid div-by-zero */
|
||||
k = nr_pages * (PAGE_SIZE / 1024);
|
||||
kps = (k * 100) / centisecs;
|
||||
printk(KERN_INFO "PM: %s %u kbytes in %u.%02u seconds (%u.%02u MB/s)\n",
|
||||
msg, k,
|
||||
centisecs / 100, centisecs % 100,
|
||||
kps / 1000, (kps % 1000) / 10);
|
||||
pr_info("%s %u kbytes in %u.%02u seconds (%u.%02u MB/s)\n",
|
||||
msg, k, centisecs / 100, centisecs % 100, kps / 1000,
|
||||
(kps % 1000) / 10);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -271,8 +272,7 @@ static int create_image(int platform_mode)
|
||||
|
||||
error = dpm_suspend_end(PMSG_FREEZE);
|
||||
if (error) {
|
||||
printk(KERN_ERR "PM: Some devices failed to power down, "
|
||||
"aborting hibernation\n");
|
||||
pr_err("Some devices failed to power down, aborting hibernation\n");
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -288,8 +288,7 @@ static int create_image(int platform_mode)
|
||||
|
||||
error = syscore_suspend();
|
||||
if (error) {
|
||||
printk(KERN_ERR "PM: Some system devices failed to power down, "
|
||||
"aborting hibernation\n");
|
||||
pr_err("Some system devices failed to power down, aborting hibernation\n");
|
||||
goto Enable_irqs;
|
||||
}
|
||||
|
||||
@ -304,8 +303,8 @@ static int create_image(int platform_mode)
|
||||
restore_processor_state();
|
||||
trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
|
||||
if (error)
|
||||
printk(KERN_ERR "PM: Error %d creating hibernation image\n",
|
||||
error);
|
||||
pr_err("Error %d creating hibernation image\n", error);
|
||||
|
||||
if (!in_suspend) {
|
||||
events_check_enabled = false;
|
||||
clear_free_pages();
|
||||
@ -432,8 +431,7 @@ static int resume_target_kernel(bool platform_mode)
|
||||
|
||||
error = dpm_suspend_end(PMSG_QUIESCE);
|
||||
if (error) {
|
||||
printk(KERN_ERR "PM: Some devices failed to power down, "
|
||||
"aborting resume\n");
|
||||
pr_err("Some devices failed to power down, aborting resume\n");
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -608,6 +606,22 @@ static void power_down(void)
|
||||
{
|
||||
#ifdef CONFIG_SUSPEND
|
||||
int error;
|
||||
|
||||
if (hibernation_mode == HIBERNATION_SUSPEND) {
|
||||
error = suspend_devices_and_enter(PM_SUSPEND_MEM);
|
||||
if (error) {
|
||||
hibernation_mode = hibernation_ops ?
|
||||
HIBERNATION_PLATFORM :
|
||||
HIBERNATION_SHUTDOWN;
|
||||
} else {
|
||||
/* Restore swap signature. */
|
||||
error = swsusp_unmark();
|
||||
if (error)
|
||||
pr_err("Swap will be unusable! Try swapon -a.\n");
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
switch (hibernation_mode) {
|
||||
@ -620,32 +634,13 @@ static void power_down(void)
|
||||
if (pm_power_off)
|
||||
kernel_power_off();
|
||||
break;
|
||||
#ifdef CONFIG_SUSPEND
|
||||
case HIBERNATION_SUSPEND:
|
||||
error = suspend_devices_and_enter(PM_SUSPEND_MEM);
|
||||
if (error) {
|
||||
if (hibernation_ops)
|
||||
hibernation_mode = HIBERNATION_PLATFORM;
|
||||
else
|
||||
hibernation_mode = HIBERNATION_SHUTDOWN;
|
||||
power_down();
|
||||
}
|
||||
/*
|
||||
* Restore swap signature.
|
||||
*/
|
||||
error = swsusp_unmark();
|
||||
if (error)
|
||||
printk(KERN_ERR "PM: Swap will be unusable! "
|
||||
"Try swapon -a.\n");
|
||||
return;
|
||||
#endif
|
||||
}
|
||||
kernel_halt();
|
||||
/*
|
||||
* Valid image is on the disk, if we continue we risk serious data
|
||||
* corruption after resume.
|
||||
*/
|
||||
printk(KERN_CRIT "PM: Please power down manually\n");
|
||||
pr_crit("Power down manually\n");
|
||||
while (1)
|
||||
cpu_relax();
|
||||
}
|
||||
@ -655,7 +650,7 @@ static int load_image_and_restore(void)
|
||||
int error;
|
||||
unsigned int flags;
|
||||
|
||||
pr_debug("PM: Loading hibernation image.\n");
|
||||
pr_debug("Loading hibernation image.\n");
|
||||
|
||||
lock_device_hotplug();
|
||||
error = create_basic_memory_bitmaps();
|
||||
@ -667,7 +662,7 @@ static int load_image_and_restore(void)
|
||||
if (!error)
|
||||
hibernation_restore(flags & SF_PLATFORM_MODE);
|
||||
|
||||
printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n");
|
||||
pr_err("Failed to load hibernation image, recovering.\n");
|
||||
swsusp_free();
|
||||
free_basic_memory_bitmaps();
|
||||
Unlock:
|
||||
@ -685,7 +680,7 @@ int hibernate(void)
|
||||
bool snapshot_test = false;
|
||||
|
||||
if (!hibernation_available()) {
|
||||
pr_debug("PM: Hibernation not available.\n");
|
||||
pr_debug("Hibernation not available.\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
@ -703,9 +698,9 @@ int hibernate(void)
|
||||
goto Exit;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "PM: Syncing filesystems ... ");
|
||||
pr_info("Syncing filesystems ... \n");
|
||||
sys_sync();
|
||||
printk("done.\n");
|
||||
pr_info("done.\n");
|
||||
|
||||
error = freeze_processes();
|
||||
if (error)
|
||||
@ -731,7 +726,7 @@ int hibernate(void)
|
||||
else
|
||||
flags |= SF_CRC32_MODE;
|
||||
|
||||
pr_debug("PM: writing image.\n");
|
||||
pr_debug("Writing image.\n");
|
||||
error = swsusp_write(flags);
|
||||
swsusp_free();
|
||||
if (!error) {
|
||||
@ -743,7 +738,7 @@ int hibernate(void)
|
||||
in_suspend = 0;
|
||||
pm_restore_gfp_mask();
|
||||
} else {
|
||||
pr_debug("PM: Image restored successfully.\n");
|
||||
pr_debug("Image restored successfully.\n");
|
||||
}
|
||||
|
||||
Free_bitmaps:
|
||||
@ -751,7 +746,7 @@ int hibernate(void)
|
||||
Thaw:
|
||||
unlock_device_hotplug();
|
||||
if (snapshot_test) {
|
||||
pr_debug("PM: Checking hibernation image\n");
|
||||
pr_debug("Checking hibernation image\n");
|
||||
error = swsusp_check();
|
||||
if (!error)
|
||||
error = load_image_and_restore();
|
||||
@ -815,10 +810,10 @@ static int software_resume(void)
|
||||
goto Unlock;
|
||||
}
|
||||
|
||||
pr_debug("PM: Checking hibernation image partition %s\n", resume_file);
|
||||
pr_debug("Checking hibernation image partition %s\n", resume_file);
|
||||
|
||||
if (resume_delay) {
|
||||
printk(KERN_INFO "Waiting %dsec before reading resume device...\n",
|
||||
pr_info("Waiting %dsec before reading resume device ...\n",
|
||||
resume_delay);
|
||||
ssleep(resume_delay);
|
||||
}
|
||||
@ -857,10 +852,10 @@ static int software_resume(void)
|
||||
}
|
||||
|
||||
Check_image:
|
||||
pr_debug("PM: Hibernation image partition %d:%d present\n",
|
||||
pr_debug("Hibernation image partition %d:%d present\n",
|
||||
MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
|
||||
|
||||
pr_debug("PM: Looking for hibernation image.\n");
|
||||
pr_debug("Looking for hibernation image.\n");
|
||||
error = swsusp_check();
|
||||
if (error)
|
||||
goto Unlock;
|
||||
@ -879,7 +874,7 @@ static int software_resume(void)
|
||||
goto Close_Finish;
|
||||
}
|
||||
|
||||
pr_debug("PM: Preparing processes for restore.\n");
|
||||
pr_debug("Preparing processes for restore.\n");
|
||||
error = freeze_processes();
|
||||
if (error)
|
||||
goto Close_Finish;
|
||||
@ -892,7 +887,7 @@ static int software_resume(void)
|
||||
/* For success case, the suspend path will release the lock */
|
||||
Unlock:
|
||||
mutex_unlock(&pm_mutex);
|
||||
pr_debug("PM: Hibernation image not present or could not be loaded.\n");
|
||||
pr_debug("Hibernation image not present or could not be loaded.\n");
|
||||
return error;
|
||||
Close_Finish:
|
||||
swsusp_close(FMODE_READ);
|
||||
@ -1016,7 +1011,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
error = -EINVAL;
|
||||
|
||||
if (!error)
|
||||
pr_debug("PM: Hibernation mode set to '%s'\n",
|
||||
pr_debug("Hibernation mode set to '%s'\n",
|
||||
hibernation_modes[mode]);
|
||||
unlock_system_sleep();
|
||||
return error ? error : n;
|
||||
@ -1052,7 +1047,7 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
lock_system_sleep();
|
||||
swsusp_resume_device = res;
|
||||
unlock_system_sleep();
|
||||
printk(KERN_INFO "PM: Starting manual resume from disk\n");
|
||||
pr_info("Starting manual resume from disk\n");
|
||||
noresume = 0;
|
||||
software_resume();
|
||||
return n;
|
||||
|
Loading…
Reference in New Issue
Block a user