Merge branches 'pm-pci' and 'pm-core'

* pm-pci:
  PCI / PM: Fix small typo in documentation
  PCI / PM: constify pci_platform_pm_ops structure

* pm-core:
  PM / core: fix typo in documentation
  PM / runtime: Add new helper for conditional usage count incrementation
  MAINTAINERS: Add an entry for the PM core
  PM / runtime: Re-init runtime PM states at probe error and driver unbind
  PM / sleep: prohibit devices probing during suspend/hibernation
This commit is contained in:
Rafael J. Wysocki 2016-01-12 01:10:52 +01:00
commit 92266e1de4
13 changed files with 142 additions and 13 deletions

View File

@ -999,7 +999,7 @@ from its probe routine to make runtime PM work for the device.
It is important to remember that the driver's runtime_suspend() callback
may be executed right after the usage counter has been decremented, because
user space may already have cuased the pm_runtime_allow() helper function
user space may already have caused the pm_runtime_allow() helper function
unblocking the runtime PM of the device to run via sysfs, so the driver must
be prepared to cope with that.

View File

@ -371,6 +371,12 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
- increment the device's usage counter, run pm_runtime_resume(dev) and
return its result
int pm_runtime_get_if_in_use(struct device *dev);
- return -EINVAL if 'power.disable_depth' is nonzero; otherwise, if the
runtime PM status is RPM_ACTIVE and the runtime PM usage counter is
nonzero, increment the counter and return 1; otherwise return 0 without
changing the counter
void pm_runtime_put_noidle(struct device *dev);
- decrement the device's usage counter

View File

@ -8447,6 +8447,17 @@ F: fs/timerfd.c
F: include/linux/timer*
F: kernel/time/*timer*
POWER MANAGEMENT CORE
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
L: linux-pm@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
S: Supported
F: drivers/base/power/
F: include/linux/pm.h
F: include/linux/pm_*
F: include/linux/powercap.h
F: drivers/powercap/
POWER SUPPLY CLASS/SUBSYSTEM and DRIVERS
M: Sebastian Reichel <sre@kernel.org>
M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>

View File

@ -131,6 +131,8 @@ extern void device_remove_groups(struct device *dev,
extern char *make_class_name(const char *name, struct kobject *kobj);
extern int devres_release_all(struct device *dev);
extern void device_block_probing(void);
extern void device_unblock_probing(void);
/* /sys/devices directory */
extern struct kset *devices_kset;

View File

@ -54,6 +54,13 @@ static LIST_HEAD(deferred_probe_active_list);
static struct workqueue_struct *deferred_wq;
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
* to prohibit probing of devices as it could be unsafe.
* Once defer_all_probes is true all drivers probes will be forcibly deferred.
*/
static bool defer_all_probes;
/*
* deferred_probe_work_func() - Retry probing devices in the active list.
*/
@ -171,6 +178,30 @@ static void driver_deferred_probe_trigger(void)
queue_work(deferred_wq, &deferred_probe_work);
}
/**
* device_block_probing() - Block/defere device's probes
*
* It will disable probing of devices and defer their probes instead.
*/
void device_block_probing(void)
{
defer_all_probes = true;
/* sync with probes to avoid races. */
wait_for_device_probe();
}
/**
* device_unblock_probing() - Unblock/enable device's probes
*
* It will restore normal behavior and trigger re-probing of deferred
* devices.
*/
void device_unblock_probing(void)
{
defer_all_probes = false;
driver_deferred_probe_trigger();
}
/**
* deferred_probe_initcall() - Enable probing of deferred devices
*
@ -280,9 +311,20 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
static int really_probe(struct device *dev, struct device_driver *drv)
{
int ret = 0;
int ret = -EPROBE_DEFER;
int local_trigger_count = atomic_read(&deferred_trigger_count);
if (defer_all_probes) {
/*
* Value of defer_all_probes can be set only by
* device_defer_all_probes_enable() which, in turn, will call
* wait_for_device_probe() right after that to avoid any races.
*/
dev_dbg(dev, "Driver %s force probe deferral\n", drv->name);
driver_deferred_probe_add(dev);
return ret;
}
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
@ -347,6 +389,7 @@ pinctrl_bind_failed:
dev_set_drvdata(dev, NULL);
if (dev->pm_domain && dev->pm_domain->dismiss)
dev->pm_domain->dismiss(dev);
pm_runtime_reinit(dev);
switch (ret) {
case -EPROBE_DEFER:
@ -400,6 +443,10 @@ int driver_probe_done(void)
*/
void wait_for_device_probe(void)
{
/* wait for the deferred probe workqueue to finish */
if (driver_deferred_probe_enable)
flush_workqueue(deferred_wq);
/* wait for the known devices to complete their probing */
wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
async_synchronize_full();
@ -702,6 +749,7 @@ static void __device_release_driver(struct device *dev)
dev_set_drvdata(dev, NULL);
if (dev->pm_domain && dev->pm_domain->dismiss)
dev->pm_domain->dismiss(dev);
pm_runtime_reinit(dev);
klist_remove(&dev->p->knode_driver);
if (dev->bus)

View File

@ -112,7 +112,7 @@ EXPORT_SYMBOL_GPL(dev_pm_domain_attach);
/**
* dev_pm_domain_detach - Detach a device from its PM domain.
* @dev: Device to attach.
* @dev: Device to detach.
* @power_off: Used to indicate whether we should power off the device.
*
* This functions will reverse the actions from dev_pm_domain_attach() and thus

View File

@ -963,6 +963,9 @@ void dpm_complete(pm_message_t state)
}
list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
/* Allow device probing and trigger re-probing of deferred devices */
device_unblock_probing();
trace_suspend_resume(TPS("dpm_complete"), state.event, false);
}
@ -1624,6 +1627,20 @@ int dpm_prepare(pm_message_t state)
trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
might_sleep();
/*
* Give a chance for the known devices to complete their probes, before
* disable probing of devices. This sync point is important at least
* at boot time + hibernation restore.
*/
wait_for_device_probe();
/*
* It is unsafe if probing of devices will happen during suspend or
* hibernation and system behavior will be unpredictable in this case.
* So, let's prohibit device's probing here and defer their probes
* instead. The normal behavior will be restored in dpm_complete().
*/
device_block_probing();
mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_list)) {
struct device *dev = to_device(dpm_list.next);

View File

@ -18,6 +18,7 @@ static inline void pm_runtime_early_init(struct device *dev)
}
extern void pm_runtime_init(struct device *dev);
extern void pm_runtime_reinit(struct device *dev);
extern void pm_runtime_remove(struct device *dev);
struct wake_irq {
@ -84,6 +85,7 @@ static inline void pm_runtime_early_init(struct device *dev)
}
static inline void pm_runtime_init(struct device *dev) {}
static inline void pm_runtime_reinit(struct device *dev) {}
static inline void pm_runtime_remove(struct device *dev) {}
static inline int dpm_sysfs_add(struct device *dev) { return 0; }

View File

@ -965,6 +965,30 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
}
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
* pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
* @dev: Device to handle.
*
* Return -EINVAL if runtime PM is disabled for the device.
*
* If that's not the case and if the device's runtime PM status is RPM_ACTIVE
* and the runtime PM usage counter is nonzero, increment the counter and
* return 1. Otherwise return 0 without changing the counter.
*/
int pm_runtime_get_if_in_use(struct device *dev)
{
unsigned long flags;
int retval;
spin_lock_irqsave(&dev->power.lock, flags);
retval = dev->power.disable_depth > 0 ? -EINVAL :
dev->power.runtime_status == RPM_ACTIVE
&& atomic_inc_not_zero(&dev->power.usage_count);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
/**
* __pm_runtime_set_status - Set runtime PM status of a device.
* @dev: Device to handle.
@ -1389,6 +1413,25 @@ void pm_runtime_init(struct device *dev)
init_waitqueue_head(&dev->power.wait_queue);
}
/**
* pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
* @dev: Device object to re-initialize.
*/
void pm_runtime_reinit(struct device *dev)
{
if (!pm_runtime_enabled(dev)) {
if (dev->power.runtime_status == RPM_ACTIVE)
pm_runtime_set_suspended(dev);
if (dev->power.irq_safe) {
spin_lock_irq(&dev->power.lock);
dev->power.irq_safe = 0;
spin_unlock_irq(&dev->power.lock);
if (dev->parent)
pm_runtime_put(dev->parent);
}
}
}
/**
* pm_runtime_remove - Prepare for removing a device from device hierarchy.
* @dev: Device object being removed from device hierarchy.
@ -1396,12 +1439,7 @@ void pm_runtime_init(struct device *dev)
void pm_runtime_remove(struct device *dev)
{
__pm_runtime_disable(dev, false);
/* Change the status back to 'suspended' to match the initial status. */
if (dev->power.runtime_status == RPM_ACTIVE)
pm_runtime_set_suspended(dev);
if (dev->power.irq_safe && dev->parent)
pm_runtime_put(dev->parent);
pm_runtime_reinit(dev);
}
/**

View File

@ -529,7 +529,7 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
return !!adev->power.flags.dsw_present;
}
static struct pci_platform_pm_ops acpi_pci_platform_pm = {
static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
.is_manageable = acpi_pci_power_manageable,
.set_state = acpi_pci_set_power_state,
.choose_state = acpi_pci_choose_state,

View File

@ -527,9 +527,9 @@ static void pci_restore_bars(struct pci_dev *dev)
pci_update_resource(dev, i);
}
static struct pci_platform_pm_ops *pci_platform_pm;
static const struct pci_platform_pm_ops *pci_platform_pm;
int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
{
if (!ops->is_manageable || !ops->set_state || !ops->choose_state
|| !ops->sleep_wake)

View File

@ -68,7 +68,7 @@ struct pci_platform_pm_ops {
bool (*need_resume)(struct pci_dev *dev);
};
int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
int pci_set_platform_pm(const struct pci_platform_pm_ops *ops);
void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
void pci_power_up(struct pci_dev *dev);
void pci_disable_enabled_device(struct pci_dev *dev);

View File

@ -39,6 +39,7 @@ extern int pm_runtime_force_resume(struct device *dev);
extern int __pm_runtime_idle(struct device *dev, int rpmflags);
extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
extern int __pm_runtime_resume(struct device *dev, int rpmflags);
extern int pm_runtime_get_if_in_use(struct device *dev);
extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
extern int pm_runtime_barrier(struct device *dev);
@ -143,6 +144,10 @@ static inline int pm_schedule_suspend(struct device *dev, unsigned int delay)
{
return -ENOSYS;
}
static inline int pm_runtime_get_if_in_use(struct device *dev)
{
return -EINVAL;
}
static inline int __pm_runtime_set_status(struct device *dev,
unsigned int status) { return 0; }
static inline int pm_runtime_barrier(struct device *dev) { return 0; }