Merge branch 'power-domains' into for-linus

* power-domains:
  PM: Fix build issue in clock_ops.c for CONFIG_PM_RUNTIME unset
  PM: Revert "driver core: platform_bus: allow runtime override of dev_pm_ops"
  OMAP1 / PM: Use generic clock manipulation routines for runtime PM
  PM / Runtime: Generic clock manipulation rountines for runtime PM (v6)
  PM / Runtime: Add subsystem data field to struct dev_pm_info
  OMAP2+ / PM: move runtime PM implementation to use device power domains
  PM / Platform: Use generic runtime PM callbacks directly
  shmobile: Use power domains for platform runtime PM
  PM: Export platform bus type's default PM callbacks
  PM: Make power domain callbacks take precedence over subsystem ones
This commit is contained in:
Rafael J. Wysocki 2011-05-17 23:23:46 +02:00
commit 290c748725
15 changed files with 689 additions and 447 deletions

View File

@ -24,75 +24,50 @@
#ifdef CONFIG_PM_RUNTIME
static int omap1_pm_runtime_suspend(struct device *dev)
{
struct clk *iclk, *fclk;
int ret = 0;
int ret;
dev_dbg(dev, "%s\n", __func__);
ret = pm_generic_runtime_suspend(dev);
if (ret)
return ret;
fclk = clk_get(dev, "fck");
if (!IS_ERR(fclk)) {
clk_disable(fclk);
clk_put(fclk);
}
iclk = clk_get(dev, "ick");
if (!IS_ERR(iclk)) {
clk_disable(iclk);
clk_put(iclk);
ret = pm_runtime_clk_suspend(dev);
if (ret) {
pm_generic_runtime_resume(dev);
return ret;
}
return 0;
};
}
static int omap1_pm_runtime_resume(struct device *dev)
{
struct clk *iclk, *fclk;
dev_dbg(dev, "%s\n", __func__);
iclk = clk_get(dev, "ick");
if (!IS_ERR(iclk)) {
clk_enable(iclk);
clk_put(iclk);
}
fclk = clk_get(dev, "fck");
if (!IS_ERR(fclk)) {
clk_enable(fclk);
clk_put(fclk);
}
pm_runtime_clk_resume(dev);
return pm_generic_runtime_resume(dev);
}
static struct dev_power_domain default_power_domain = {
.ops = {
.runtime_suspend = omap1_pm_runtime_suspend,
.runtime_resume = omap1_pm_runtime_resume,
USE_PLATFORM_PM_SLEEP_OPS
},
};
static struct pm_clk_notifier_block platform_bus_notifier = {
.pwr_domain = &default_power_domain,
.con_ids = { "ick", "fck", NULL, },
};
static int __init omap1_pm_runtime_init(void)
{
const struct dev_pm_ops *pm;
struct dev_pm_ops *omap_pm;
if (!cpu_class_is_omap1())
return -ENODEV;
pm = platform_bus_get_pm_ops();
if (!pm) {
pr_err("%s: unable to get dev_pm_ops from platform_bus\n",
__func__);
return -ENODEV;
}
omap_pm = kmemdup(pm, sizeof(struct dev_pm_ops), GFP_KERNEL);
if (!omap_pm) {
pr_err("%s: unable to alloc memory for new dev_pm_ops\n",
__func__);
return -ENOMEM;
}
omap_pm->runtime_suspend = omap1_pm_runtime_suspend;
omap_pm->runtime_resume = omap1_pm_runtime_resume;
platform_bus_set_pm_ops(omap_pm);
pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
return 0;
}

View File

@ -59,10 +59,10 @@ endif
# Power Management
ifeq ($(CONFIG_PM),y)
obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o
obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o pm_bus.o
obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o
obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \
cpuidle34xx.o pm_bus.o
obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o pm_bus.o
cpuidle34xx.o
obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o
obj-$(CONFIG_PM_DEBUG) += pm-debug.o
obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o
obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o

View File

@ -1,85 +0,0 @@
/*
* Runtime PM support code for OMAP
*
* Author: Kevin Hilman, Deep Root Systems, LLC
*
* Copyright (C) 2010 Texas Instruments, Inc.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <plat/omap_device.h>
#include <plat/omap-pm.h>
#ifdef CONFIG_PM_RUNTIME
static int omap_pm_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
int r, ret = 0;
dev_dbg(dev, "%s\n", __func__);
ret = pm_generic_runtime_suspend(dev);
if (!ret && dev->parent == &omap_device_parent) {
r = omap_device_idle(pdev);
WARN_ON(r);
}
return ret;
};
static int omap_pm_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
int r;
dev_dbg(dev, "%s\n", __func__);
if (dev->parent == &omap_device_parent) {
r = omap_device_enable(pdev);
WARN_ON(r);
}
return pm_generic_runtime_resume(dev);
};
#else
#define omap_pm_runtime_suspend NULL
#define omap_pm_runtime_resume NULL
#endif /* CONFIG_PM_RUNTIME */
static int __init omap_pm_runtime_init(void)
{
const struct dev_pm_ops *pm;
struct dev_pm_ops *omap_pm;
pm = platform_bus_get_pm_ops();
if (!pm) {
pr_err("%s: unable to get dev_pm_ops from platform_bus\n",
__func__);
return -ENODEV;
}
omap_pm = kmemdup(pm, sizeof(struct dev_pm_ops), GFP_KERNEL);
if (!omap_pm) {
pr_err("%s: unable to alloc memory for new dev_pm_ops\n",
__func__);
return -ENOMEM;
}
omap_pm->runtime_suspend = omap_pm_runtime_suspend;
omap_pm->runtime_resume = omap_pm_runtime_resume;
platform_bus_set_pm_ops(omap_pm);
return 0;
}
core_initcall(omap_pm_runtime_init);

View File

@ -18,152 +18,41 @@
#include <linux/clk.h>
#include <linux/sh_clk.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
#ifdef CONFIG_PM_RUNTIME
#define BIT_ONCE 0
#define BIT_ACTIVE 1
#define BIT_CLK_ENABLED 2
struct pm_runtime_data {
unsigned long flags;
struct clk *clk;
};
static void __devres_release(struct device *dev, void *res)
{
struct pm_runtime_data *prd = res;
dev_dbg(dev, "__devres_release()\n");
if (test_bit(BIT_CLK_ENABLED, &prd->flags))
clk_disable(prd->clk);
if (test_bit(BIT_ACTIVE, &prd->flags))
clk_put(prd->clk);
}
static struct pm_runtime_data *__to_prd(struct device *dev)
{
return devres_find(dev, __devres_release, NULL, NULL);
}
static void platform_pm_runtime_init(struct device *dev,
struct pm_runtime_data *prd)
{
if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) {
prd->clk = clk_get(dev, NULL);
if (!IS_ERR(prd->clk)) {
set_bit(BIT_ACTIVE, &prd->flags);
dev_info(dev, "clocks managed by runtime pm\n");
}
}
}
static void platform_pm_runtime_bug(struct device *dev,
struct pm_runtime_data *prd)
{
if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags))
dev_err(dev, "runtime pm suspend before resume\n");
}
int platform_pm_runtime_suspend(struct device *dev)
{
struct pm_runtime_data *prd = __to_prd(dev);
dev_dbg(dev, "platform_pm_runtime_suspend()\n");
platform_pm_runtime_bug(dev, prd);
if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
clk_disable(prd->clk);
clear_bit(BIT_CLK_ENABLED, &prd->flags);
}
return 0;
}
int platform_pm_runtime_resume(struct device *dev)
{
struct pm_runtime_data *prd = __to_prd(dev);
dev_dbg(dev, "platform_pm_runtime_resume()\n");
platform_pm_runtime_init(dev, prd);
if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
clk_enable(prd->clk);
set_bit(BIT_CLK_ENABLED, &prd->flags);
}
return 0;
}
int platform_pm_runtime_idle(struct device *dev)
static int default_platform_runtime_idle(struct device *dev)
{
/* suspend synchronously to disable clocks immediately */
return pm_runtime_suspend(dev);
}
static int platform_bus_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
struct pm_runtime_data *prd;
static struct dev_power_domain default_power_domain = {
.ops = {
.runtime_suspend = pm_runtime_clk_suspend,
.runtime_resume = pm_runtime_clk_resume,
.runtime_idle = default_platform_runtime_idle,
USE_PLATFORM_PM_SLEEP_OPS
},
};
dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
#define DEFAULT_PWR_DOMAIN_PTR (&default_power_domain)
if (action == BUS_NOTIFY_BIND_DRIVER) {
prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL);
if (prd)
devres_add(dev, prd);
else
dev_err(dev, "unable to alloc memory for runtime pm\n");
}
#else
return 0;
}
#else /* CONFIG_PM_RUNTIME */
static int platform_bus_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
struct clk *clk;
dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
switch (action) {
case BUS_NOTIFY_BIND_DRIVER:
clk = clk_get(dev, NULL);
if (!IS_ERR(clk)) {
clk_enable(clk);
clk_put(clk);
dev_info(dev, "runtime pm disabled, clock forced on\n");
}
break;
case BUS_NOTIFY_UNBOUND_DRIVER:
clk = clk_get(dev, NULL);
if (!IS_ERR(clk)) {
clk_disable(clk);
clk_put(clk);
dev_info(dev, "runtime pm disabled, clock forced off\n");
}
break;
}
return 0;
}
#define DEFAULT_PWR_DOMAIN_PTR NULL
#endif /* CONFIG_PM_RUNTIME */
static struct notifier_block platform_bus_notifier = {
.notifier_call = platform_bus_notify
static struct pm_clk_notifier_block platform_bus_notifier = {
.pwr_domain = DEFAULT_PWR_DOMAIN_PTR,
.con_ids = { NULL, },
};
static int __init sh_pm_runtime_init(void)
{
bus_register_notifier(&platform_bus_type, &platform_bus_notifier);
pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
return 0;
}
core_initcall(sh_pm_runtime_init);

View File

@ -536,6 +536,28 @@ int omap_early_device_register(struct omap_device *od)
return 0;
}
static int _od_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
return omap_device_idle(pdev);
}
static int _od_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
return omap_device_enable(pdev);
}
static struct dev_power_domain omap_device_power_domain = {
.ops = {
.runtime_suspend = _od_runtime_suspend,
.runtime_resume = _od_runtime_resume,
USE_PLATFORM_PM_SLEEP_OPS
}
};
/**
* omap_device_register - register an omap_device with one omap_hwmod
* @od: struct omap_device * to register
@ -549,6 +571,7 @@ int omap_device_register(struct omap_device *od)
pr_debug("omap_device: %s: registering\n", od->pdev.name);
od->pdev.dev.parent = &omap_device_parent;
od->pdev.dev.pwr_domain = &omap_device_power_domain;
return platform_device_register(&od->pdev);
}

View File

@ -139,7 +139,7 @@ void platform_pm_runtime_suspend_idle(void)
queue_work(pm_wq, &hwblk_work);
}
int platform_pm_runtime_suspend(struct device *dev)
static int default_platform_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pdev_archdata *ad = &pdev->archdata;
@ -147,7 +147,7 @@ int platform_pm_runtime_suspend(struct device *dev)
int hwblk = ad->hwblk_id;
int ret = 0;
dev_dbg(dev, "platform_pm_runtime_suspend() [%d]\n", hwblk);
dev_dbg(dev, "%s() [%d]\n", __func__, hwblk);
/* ignore off-chip platform devices */
if (!hwblk)
@ -183,20 +183,20 @@ int platform_pm_runtime_suspend(struct device *dev)
mutex_unlock(&ad->mutex);
out:
dev_dbg(dev, "platform_pm_runtime_suspend() [%d] returns %d\n",
hwblk, ret);
dev_dbg(dev, "%s() [%d] returns %d\n",
__func__, hwblk, ret);
return ret;
}
int platform_pm_runtime_resume(struct device *dev)
static int default_platform_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pdev_archdata *ad = &pdev->archdata;
int hwblk = ad->hwblk_id;
int ret = 0;
dev_dbg(dev, "platform_pm_runtime_resume() [%d]\n", hwblk);
dev_dbg(dev, "%s() [%d]\n", __func__, hwblk);
/* ignore off-chip platform devices */
if (!hwblk)
@ -228,19 +228,19 @@ int platform_pm_runtime_resume(struct device *dev)
*/
mutex_unlock(&ad->mutex);
out:
dev_dbg(dev, "platform_pm_runtime_resume() [%d] returns %d\n",
hwblk, ret);
dev_dbg(dev, "%s() [%d] returns %d\n",
__func__, hwblk, ret);
return ret;
}
int platform_pm_runtime_idle(struct device *dev)
static int default_platform_runtime_idle(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
int hwblk = pdev->archdata.hwblk_id;
int ret = 0;
dev_dbg(dev, "platform_pm_runtime_idle() [%d]\n", hwblk);
dev_dbg(dev, "%s() [%d]\n", __func__, hwblk);
/* ignore off-chip platform devices */
if (!hwblk)
@ -252,10 +252,19 @@ int platform_pm_runtime_idle(struct device *dev)
/* suspend synchronously to disable clocks immediately */
ret = pm_runtime_suspend(dev);
out:
dev_dbg(dev, "platform_pm_runtime_idle() [%d] done!\n", hwblk);
dev_dbg(dev, "%s() [%d] done!\n", __func__, hwblk);
return ret;
}
static struct dev_power_domain default_power_domain = {
.ops = {
.runtime_suspend = default_platform_runtime_suspend,
.runtime_resume = default_platform_runtime_resume,
.runtime_idle = default_platform_runtime_idle,
USE_PLATFORM_PM_SLEEP_OPS
},
};
static int platform_bus_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
@ -276,6 +285,7 @@ static int platform_bus_notify(struct notifier_block *nb,
hwblk_disable(hwblk_info, hwblk);
/* make sure driver re-inits itself once */
__set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
dev->pwr_domain = &default_power_domain;
break;
/* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */
case BUS_NOTIFY_BOUND_DRIVER:
@ -289,6 +299,7 @@ static int platform_bus_notify(struct notifier_block *nb,
__set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
break;
case BUS_NOTIFY_DEL_DEVICE:
dev->pwr_domain = NULL;
break;
}
return 0;

View File

@ -667,7 +667,7 @@ static int platform_legacy_resume(struct device *dev)
return ret;
}
static int platform_pm_prepare(struct device *dev)
int platform_pm_prepare(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@ -678,7 +678,7 @@ static int platform_pm_prepare(struct device *dev)
return ret;
}
static void platform_pm_complete(struct device *dev)
void platform_pm_complete(struct device *dev)
{
struct device_driver *drv = dev->driver;
@ -686,16 +686,11 @@ static void platform_pm_complete(struct device *dev)
drv->pm->complete(dev);
}
#else /* !CONFIG_PM_SLEEP */
#define platform_pm_prepare NULL
#define platform_pm_complete NULL
#endif /* !CONFIG_PM_SLEEP */
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_SUSPEND
int __weak platform_pm_suspend(struct device *dev)
int platform_pm_suspend(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@ -713,7 +708,7 @@ int __weak platform_pm_suspend(struct device *dev)
return ret;
}
int __weak platform_pm_suspend_noirq(struct device *dev)
int platform_pm_suspend_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@ -729,7 +724,7 @@ int __weak platform_pm_suspend_noirq(struct device *dev)
return ret;
}
int __weak platform_pm_resume(struct device *dev)
int platform_pm_resume(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@ -747,7 +742,7 @@ int __weak platform_pm_resume(struct device *dev)
return ret;
}
int __weak platform_pm_resume_noirq(struct device *dev)
int platform_pm_resume_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@ -763,18 +758,11 @@ int __weak platform_pm_resume_noirq(struct device *dev)
return ret;
}
#else /* !CONFIG_SUSPEND */
#define platform_pm_suspend NULL
#define platform_pm_resume NULL
#define platform_pm_suspend_noirq NULL
#define platform_pm_resume_noirq NULL
#endif /* !CONFIG_SUSPEND */
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
static int platform_pm_freeze(struct device *dev)
int platform_pm_freeze(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@ -792,7 +780,7 @@ static int platform_pm_freeze(struct device *dev)
return ret;
}
static int platform_pm_freeze_noirq(struct device *dev)
int platform_pm_freeze_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@ -808,7 +796,7 @@ static int platform_pm_freeze_noirq(struct device *dev)
return ret;
}
static int platform_pm_thaw(struct device *dev)
int platform_pm_thaw(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@ -826,7 +814,7 @@ static int platform_pm_thaw(struct device *dev)
return ret;
}
static int platform_pm_thaw_noirq(struct device *dev)
int platform_pm_thaw_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@ -842,7 +830,7 @@ static int platform_pm_thaw_noirq(struct device *dev)
return ret;
}
static int platform_pm_poweroff(struct device *dev)
int platform_pm_poweroff(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@ -860,7 +848,7 @@ static int platform_pm_poweroff(struct device *dev)
return ret;
}
static int platform_pm_poweroff_noirq(struct device *dev)
int platform_pm_poweroff_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@ -876,7 +864,7 @@ static int platform_pm_poweroff_noirq(struct device *dev)
return ret;
}
static int platform_pm_restore(struct device *dev)
int platform_pm_restore(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@ -894,7 +882,7 @@ static int platform_pm_restore(struct device *dev)
return ret;
}
static int platform_pm_restore_noirq(struct device *dev)
int platform_pm_restore_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@ -910,62 +898,13 @@ static int platform_pm_restore_noirq(struct device *dev)
return ret;
}
#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define platform_pm_freeze NULL
#define platform_pm_thaw NULL
#define platform_pm_poweroff NULL
#define platform_pm_restore NULL
#define platform_pm_freeze_noirq NULL
#define platform_pm_thaw_noirq NULL
#define platform_pm_poweroff_noirq NULL
#define platform_pm_restore_noirq NULL
#endif /* !CONFIG_HIBERNATE_CALLBACKS */
#ifdef CONFIG_PM_RUNTIME
int __weak platform_pm_runtime_suspend(struct device *dev)
{
return pm_generic_runtime_suspend(dev);
};
int __weak platform_pm_runtime_resume(struct device *dev)
{
return pm_generic_runtime_resume(dev);
};
int __weak platform_pm_runtime_idle(struct device *dev)
{
return pm_generic_runtime_idle(dev);
};
#else /* !CONFIG_PM_RUNTIME */
#define platform_pm_runtime_suspend NULL
#define platform_pm_runtime_resume NULL
#define platform_pm_runtime_idle NULL
#endif /* !CONFIG_PM_RUNTIME */
#endif /* CONFIG_HIBERNATE_CALLBACKS */
static const struct dev_pm_ops platform_dev_pm_ops = {
.prepare = platform_pm_prepare,
.complete = platform_pm_complete,
.suspend = platform_pm_suspend,
.resume = platform_pm_resume,
.freeze = platform_pm_freeze,
.thaw = platform_pm_thaw,
.poweroff = platform_pm_poweroff,
.restore = platform_pm_restore,
.suspend_noirq = platform_pm_suspend_noirq,
.resume_noirq = platform_pm_resume_noirq,
.freeze_noirq = platform_pm_freeze_noirq,
.thaw_noirq = platform_pm_thaw_noirq,
.poweroff_noirq = platform_pm_poweroff_noirq,
.restore_noirq = platform_pm_restore_noirq,
.runtime_suspend = platform_pm_runtime_suspend,
.runtime_resume = platform_pm_runtime_resume,
.runtime_idle = platform_pm_runtime_idle,
.runtime_suspend = pm_generic_runtime_suspend,
.runtime_resume = pm_generic_runtime_resume,
.runtime_idle = pm_generic_runtime_idle,
USE_PLATFORM_PM_SLEEP_OPS
};
struct bus_type platform_bus_type = {
@ -977,41 +916,6 @@ struct bus_type platform_bus_type = {
};
EXPORT_SYMBOL_GPL(platform_bus_type);
/**
* platform_bus_get_pm_ops() - return pointer to busses dev_pm_ops
*
* This function can be used by platform code to get the current
* set of dev_pm_ops functions used by the platform_bus_type.
*/
const struct dev_pm_ops * __init platform_bus_get_pm_ops(void)
{
return platform_bus_type.pm;
}
/**
* platform_bus_set_pm_ops() - update dev_pm_ops for the platform_bus_type
*
* @pm: pointer to new dev_pm_ops struct to be used for platform_bus_type
*
* Platform code can override the dev_pm_ops methods of
* platform_bus_type by using this function. It is expected that
* platform code will first do a platform_bus_get_pm_ops(), then
* kmemdup it, then customize selected methods and pass a pointer to
* the new struct dev_pm_ops to this function.
*
* Since platform-specific code is customizing methods for *all*
* devices (not just platform-specific devices) it is expected that
* any custom overrides of these functions will keep existing behavior
* and simply extend it. For example, any customization of the
* runtime PM methods should continue to call the pm_generic_*
* functions as the default ones do in addition to the
* platform-specific behavior.
*/
void __init platform_bus_set_pm_ops(const struct dev_pm_ops *pm)
{
platform_bus_type.pm = pm;
}
int __init platform_bus_init(void)
{
int error;

View File

@ -3,6 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_RUNTIME) += runtime.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_OPP) += opp.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG

View File

@ -0,0 +1,431 @@
/*
* drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks
*
* Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
*
* This file is released under the GPLv2.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/err.h>
#ifdef CONFIG_PM_RUNTIME
struct pm_runtime_clk_data {
struct list_head clock_list;
struct mutex lock;
};
enum pce_status {
PCE_STATUS_NONE = 0,
PCE_STATUS_ACQUIRED,
PCE_STATUS_ENABLED,
PCE_STATUS_ERROR,
};
struct pm_clock_entry {
struct list_head node;
char *con_id;
struct clk *clk;
enum pce_status status;
};
static struct pm_runtime_clk_data *__to_prd(struct device *dev)
{
return dev ? dev->power.subsys_data : NULL;
}
/**
* pm_runtime_clk_add - Start using a device clock for runtime PM.
* @dev: Device whose clock is going to be used for runtime PM.
* @con_id: Connection ID of the clock.
*
* Add the clock represented by @con_id to the list of clocks used for
* the runtime PM of @dev.
*/
int pm_runtime_clk_add(struct device *dev, const char *con_id)
{
struct pm_runtime_clk_data *prd = __to_prd(dev);
struct pm_clock_entry *ce;
if (!prd)
return -EINVAL;
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
if (!ce) {
dev_err(dev, "Not enough memory for clock entry.\n");
return -ENOMEM;
}
if (con_id) {
ce->con_id = kstrdup(con_id, GFP_KERNEL);
if (!ce->con_id) {
dev_err(dev,
"Not enough memory for clock connection ID.\n");
kfree(ce);
return -ENOMEM;
}
}
mutex_lock(&prd->lock);
list_add_tail(&ce->node, &prd->clock_list);
mutex_unlock(&prd->lock);
return 0;
}
/**
* __pm_runtime_clk_remove - Destroy runtime PM clock entry.
* @ce: Runtime PM clock entry to destroy.
*
* This routine must be called under the mutex protecting the runtime PM list
* of clocks corresponding the the @ce's device.
*/
static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
{
if (!ce)
return;
list_del(&ce->node);
if (ce->status < PCE_STATUS_ERROR) {
if (ce->status == PCE_STATUS_ENABLED)
clk_disable(ce->clk);
if (ce->status >= PCE_STATUS_ACQUIRED)
clk_put(ce->clk);
}
if (ce->con_id)
kfree(ce->con_id);
kfree(ce);
}
/**
* pm_runtime_clk_remove - Stop using a device clock for runtime PM.
* @dev: Device whose clock should not be used for runtime PM any more.
* @con_id: Connection ID of the clock.
*
* Remove the clock represented by @con_id from the list of clocks used for
* the runtime PM of @dev.
*/
void pm_runtime_clk_remove(struct device *dev, const char *con_id)
{
struct pm_runtime_clk_data *prd = __to_prd(dev);
struct pm_clock_entry *ce;
if (!prd)
return;
mutex_lock(&prd->lock);
list_for_each_entry(ce, &prd->clock_list, node) {
if (!con_id && !ce->con_id) {
__pm_runtime_clk_remove(ce);
break;
} else if (!con_id || !ce->con_id) {
continue;
} else if (!strcmp(con_id, ce->con_id)) {
__pm_runtime_clk_remove(ce);
break;
}
}
mutex_unlock(&prd->lock);
}
/**
* pm_runtime_clk_init - Initialize a device's list of runtime PM clocks.
* @dev: Device to initialize the list of runtime PM clocks for.
*
* Allocate a struct pm_runtime_clk_data object, initialize its lock member and
* make the @dev's power.subsys_data field point to it.
*/
int pm_runtime_clk_init(struct device *dev)
{
struct pm_runtime_clk_data *prd;
prd = kzalloc(sizeof(*prd), GFP_KERNEL);
if (!prd) {
dev_err(dev, "Not enough memory fo runtime PM data.\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&prd->clock_list);
mutex_init(&prd->lock);
dev->power.subsys_data = prd;
return 0;
}
/**
* pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks.
* @dev: Device to destroy the list of runtime PM clocks for.
*
* Clear the @dev's power.subsys_data field, remove the list of clock entries
* from the struct pm_runtime_clk_data object pointed to by it before and free
* that object.
*/
void pm_runtime_clk_destroy(struct device *dev)
{
struct pm_runtime_clk_data *prd = __to_prd(dev);
struct pm_clock_entry *ce, *c;
if (!prd)
return;
dev->power.subsys_data = NULL;
mutex_lock(&prd->lock);
list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node)
__pm_runtime_clk_remove(ce);
mutex_unlock(&prd->lock);
kfree(prd);
}
/**
* pm_runtime_clk_acquire - Acquire a device clock.
* @dev: Device whose clock is to be acquired.
* @con_id: Connection ID of the clock.
*/
static void pm_runtime_clk_acquire(struct device *dev,
struct pm_clock_entry *ce)
{
ce->clk = clk_get(dev, ce->con_id);
if (IS_ERR(ce->clk)) {
ce->status = PCE_STATUS_ERROR;
} else {
ce->status = PCE_STATUS_ACQUIRED;
dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
}
}
/**
* pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list.
* @dev: Device to disable the clocks for.
*/
int pm_runtime_clk_suspend(struct device *dev)
{
struct pm_runtime_clk_data *prd = __to_prd(dev);
struct pm_clock_entry *ce;
dev_dbg(dev, "%s()\n", __func__);
if (!prd)
return 0;
mutex_lock(&prd->lock);
list_for_each_entry_reverse(ce, &prd->clock_list, node) {
if (ce->status == PCE_STATUS_NONE)
pm_runtime_clk_acquire(dev, ce);
if (ce->status < PCE_STATUS_ERROR) {
clk_disable(ce->clk);
ce->status = PCE_STATUS_ACQUIRED;
}
}
mutex_unlock(&prd->lock);
return 0;
}
/**
* pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list.
* @dev: Device to enable the clocks for.
*/
int pm_runtime_clk_resume(struct device *dev)
{
struct pm_runtime_clk_data *prd = __to_prd(dev);
struct pm_clock_entry *ce;
dev_dbg(dev, "%s()\n", __func__);
if (!prd)
return 0;
mutex_lock(&prd->lock);
list_for_each_entry(ce, &prd->clock_list, node) {
if (ce->status == PCE_STATUS_NONE)
pm_runtime_clk_acquire(dev, ce);
if (ce->status < PCE_STATUS_ERROR) {
clk_enable(ce->clk);
ce->status = PCE_STATUS_ENABLED;
}
}
mutex_unlock(&prd->lock);
return 0;
}
/**
* pm_runtime_clk_notify - Notify routine for device addition and removal.
* @nb: Notifier block object this function is a member of.
* @action: Operation being carried out by the caller.
* @data: Device the routine is being run for.
*
* For this function to work, @nb must be a member of an object of type
* struct pm_clk_notifier_block containing all of the requisite data.
* Specifically, the pwr_domain member of that object is copied to the device's
* pwr_domain field and its con_ids member is used to populate the device's list
* of runtime PM clocks, depending on @action.
*
* If the device's pwr_domain field is already populated with a value different
* from the one stored in the struct pm_clk_notifier_block object, the function
* does nothing.
*/
static int pm_runtime_clk_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct pm_clk_notifier_block *clknb;
struct device *dev = data;
char *con_id;
int error;
dev_dbg(dev, "%s() %ld\n", __func__, action);
clknb = container_of(nb, struct pm_clk_notifier_block, nb);
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
if (dev->pwr_domain)
break;
error = pm_runtime_clk_init(dev);
if (error)
break;
dev->pwr_domain = clknb->pwr_domain;
if (clknb->con_ids[0]) {
for (con_id = clknb->con_ids[0]; *con_id; con_id++)
pm_runtime_clk_add(dev, con_id);
} else {
pm_runtime_clk_add(dev, NULL);
}
break;
case BUS_NOTIFY_DEL_DEVICE:
if (dev->pwr_domain != clknb->pwr_domain)
break;
dev->pwr_domain = NULL;
pm_runtime_clk_destroy(dev);
break;
}
return 0;
}
#else /* !CONFIG_PM_RUNTIME */
/**
* enable_clock - Enable a device clock.
* @dev: Device whose clock is to be enabled.
* @con_id: Connection ID of the clock.
*/
static void enable_clock(struct device *dev, const char *con_id)
{
struct clk *clk;
clk = clk_get(dev, con_id);
if (!IS_ERR(clk)) {
clk_enable(clk);
clk_put(clk);
dev_info(dev, "Runtime PM disabled, clock forced on.\n");
}
}
/**
* disable_clock - Disable a device clock.
* @dev: Device whose clock is to be disabled.
* @con_id: Connection ID of the clock.
*/
static void disable_clock(struct device *dev, const char *con_id)
{
struct clk *clk;
clk = clk_get(dev, con_id);
if (!IS_ERR(clk)) {
clk_disable(clk);
clk_put(clk);
dev_info(dev, "Runtime PM disabled, clock forced off.\n");
}
}
/**
* pm_runtime_clk_notify - Notify routine for device addition and removal.
* @nb: Notifier block object this function is a member of.
* @action: Operation being carried out by the caller.
* @data: Device the routine is being run for.
*
* For this function to work, @nb must be a member of an object of type
* struct pm_clk_notifier_block containing all of the requisite data.
* Specifically, the con_ids member of that object is used to enable or disable
* the device's clocks, depending on @action.
*/
static int pm_runtime_clk_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct pm_clk_notifier_block *clknb;
struct device *dev = data;
char *con_id;
dev_dbg(dev, "%s() %ld\n", __func__, action);
clknb = container_of(nb, struct pm_clk_notifier_block, nb);
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
if (clknb->con_ids[0]) {
for (con_id = clknb->con_ids[0]; *con_id; con_id++)
enable_clock(dev, con_id);
} else {
enable_clock(dev, NULL);
}
break;
case BUS_NOTIFY_DEL_DEVICE:
if (clknb->con_ids[0]) {
for (con_id = clknb->con_ids[0]; *con_id; con_id++)
disable_clock(dev, con_id);
} else {
disable_clock(dev, NULL);
}
break;
}
return 0;
}
#endif /* !CONFIG_PM_RUNTIME */
/**
* pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks.
* @bus: Bus type to add the notifier to.
* @clknb: Notifier to be added to the given bus type.
*
* The nb member of @clknb is not expected to be initialized and its
* notifier_call member will be replaced with pm_runtime_clk_notify(). However,
* the remaining members of @clknb should be populated prior to calling this
* routine.
*/
void pm_runtime_clk_add_notifier(struct bus_type *bus,
struct pm_clk_notifier_block *clknb)
{
if (!bus || !clknb)
return;
clknb->nb.notifier_call = pm_runtime_clk_notify;
bus_register_notifier(bus, &clknb->nb);
}

View File

@ -426,10 +426,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
if (dev->pwr_domain) {
pm_dev_dbg(dev, state, "EARLY power domain ");
pm_noirq_op(dev, &dev->pwr_domain->ops, state);
}
if (dev->type && dev->type->pm) {
error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
} else if (dev->type && dev->type->pm) {
pm_dev_dbg(dev, state, "EARLY type ");
error = pm_noirq_op(dev, dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
@ -517,7 +515,8 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
if (dev->pwr_domain) {
pm_dev_dbg(dev, state, "power domain ");
pm_op(dev, &dev->pwr_domain->ops, state);
error = pm_op(dev, &dev->pwr_domain->ops, state);
goto End;
}
if (dev->type && dev->type->pm) {
@ -629,12 +628,11 @@ static void device_complete(struct device *dev, pm_message_t state)
{
device_lock(dev);
if (dev->pwr_domain && dev->pwr_domain->ops.complete) {
if (dev->pwr_domain) {
pm_dev_dbg(dev, state, "completing power domain ");
dev->pwr_domain->ops.complete(dev);
}
if (dev->type && dev->type->pm) {
if (dev->pwr_domain->ops.complete)
dev->pwr_domain->ops.complete(dev);
} else if (dev->type && dev->type->pm) {
pm_dev_dbg(dev, state, "completing type ");
if (dev->type->pm->complete)
dev->type->pm->complete(dev);
@ -732,7 +730,12 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
{
int error;
if (dev->type && dev->type->pm) {
if (dev->pwr_domain) {
pm_dev_dbg(dev, state, "LATE power domain ");
error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
if (error)
return error;
} else if (dev->type && dev->type->pm) {
pm_dev_dbg(dev, state, "LATE type ");
error = pm_noirq_op(dev, dev->type->pm, state);
if (error)
@ -749,11 +752,6 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
return error;
}
if (dev->pwr_domain) {
pm_dev_dbg(dev, state, "LATE power domain ");
pm_noirq_op(dev, &dev->pwr_domain->ops, state);
}
return 0;
}
@ -841,21 +839,27 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
goto End;
}
if (dev->pwr_domain) {
pm_dev_dbg(dev, state, "power domain ");
error = pm_op(dev, &dev->pwr_domain->ops, state);
goto End;
}
if (dev->type && dev->type->pm) {
pm_dev_dbg(dev, state, "type ");
error = pm_op(dev, dev->type->pm, state);
goto Domain;
goto End;
}
if (dev->class) {
if (dev->class->pm) {
pm_dev_dbg(dev, state, "class ");
error = pm_op(dev, dev->class->pm, state);
goto Domain;
goto End;
} else if (dev->class->suspend) {
pm_dev_dbg(dev, state, "legacy class ");
error = legacy_suspend(dev, state, dev->class->suspend);
goto Domain;
goto End;
}
}
@ -869,12 +873,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
}
Domain:
if (!error && dev->pwr_domain) {
pm_dev_dbg(dev, state, "power domain ");
pm_op(dev, &dev->pwr_domain->ops, state);
}
End:
device_unlock(dev);
complete_all(&dev->power.completion);
@ -965,7 +963,14 @@ static int device_prepare(struct device *dev, pm_message_t state)
device_lock(dev);
if (dev->type && dev->type->pm) {
if (dev->pwr_domain) {
pm_dev_dbg(dev, state, "preparing power domain ");
if (dev->pwr_domain->ops.prepare)
error = dev->pwr_domain->ops.prepare(dev);
suspend_report_result(dev->pwr_domain->ops.prepare, error);
if (error)
goto End;
} else if (dev->type && dev->type->pm) {
pm_dev_dbg(dev, state, "preparing type ");
if (dev->type->pm->prepare)
error = dev->type->pm->prepare(dev);
@ -984,13 +989,6 @@ static int device_prepare(struct device *dev, pm_message_t state)
if (dev->bus->pm->prepare)
error = dev->bus->pm->prepare(dev);
suspend_report_result(dev->bus->pm->prepare, error);
if (error)
goto End;
}
if (dev->pwr_domain && dev->pwr_domain->ops.prepare) {
pm_dev_dbg(dev, state, "preparing power domain ");
dev->pwr_domain->ops.prepare(dev);
}
End:

View File

@ -168,7 +168,6 @@ static int rpm_check_suspend_allowed(struct device *dev)
static int rpm_idle(struct device *dev, int rpmflags)
{
int (*callback)(struct device *);
int (*domain_callback)(struct device *);
int retval;
retval = rpm_check_suspend_allowed(dev);
@ -214,7 +213,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.idle_notification = true;
if (dev->type && dev->type->pm)
if (dev->pwr_domain)
callback = dev->pwr_domain->ops.runtime_idle;
else if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_idle;
else if (dev->class && dev->class->pm)
callback = dev->class->pm->runtime_idle;
@ -223,19 +224,10 @@ static int rpm_idle(struct device *dev, int rpmflags)
else
callback = NULL;
if (dev->pwr_domain)
domain_callback = dev->pwr_domain->ops.runtime_idle;
else
domain_callback = NULL;
if (callback || domain_callback) {
if (callback) {
spin_unlock_irq(&dev->power.lock);
if (domain_callback)
retval = domain_callback(dev);
if (!retval && callback)
callback(dev);
callback(dev);
spin_lock_irq(&dev->power.lock);
}
@ -382,7 +374,9 @@ static int rpm_suspend(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_SUSPENDING);
if (dev->type && dev->type->pm)
if (dev->pwr_domain)
callback = dev->pwr_domain->ops.runtime_suspend;
else if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_suspend;
else if (dev->class && dev->class->pm)
callback = dev->class->pm->runtime_suspend;
@ -400,8 +394,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
else
pm_runtime_cancel_pending(dev);
} else {
if (dev->pwr_domain)
rpm_callback(dev->pwr_domain->ops.runtime_suspend, dev);
no_callback:
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_deactivate_timer(dev);
@ -582,9 +574,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_RESUMING);
if (dev->pwr_domain)
rpm_callback(dev->pwr_domain->ops.runtime_resume, dev);
if (dev->type && dev->type->pm)
callback = dev->pwr_domain->ops.runtime_resume;
else if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_resume;
else if (dev->class && dev->class->pm)
callback = dev->class->pm->runtime_resume;

View File

@ -150,9 +150,6 @@ extern struct platform_device *platform_create_bundle(struct platform_driver *dr
struct resource *res, unsigned int n_res,
const void *data, size_t size);
extern const struct dev_pm_ops * platform_bus_get_pm_ops(void);
extern void platform_bus_set_pm_ops(const struct dev_pm_ops *pm);
/* early platform driver interface */
struct early_platform_driver {
const char *class_str;
@ -205,4 +202,64 @@ static inline char *early_platform_driver_setup_func(void) \
}
#endif /* MODULE */
#ifdef CONFIG_PM_SLEEP
extern int platform_pm_prepare(struct device *dev);
extern void platform_pm_complete(struct device *dev);
#else
#define platform_pm_prepare NULL
#define platform_pm_complete NULL
#endif
#ifdef CONFIG_SUSPEND
extern int platform_pm_suspend(struct device *dev);
extern int platform_pm_suspend_noirq(struct device *dev);
extern int platform_pm_resume(struct device *dev);
extern int platform_pm_resume_noirq(struct device *dev);
#else
#define platform_pm_suspend NULL
#define platform_pm_resume NULL
#define platform_pm_suspend_noirq NULL
#define platform_pm_resume_noirq NULL
#endif
#ifdef CONFIG_HIBERNATE_CALLBACKS
extern int platform_pm_freeze(struct device *dev);
extern int platform_pm_freeze_noirq(struct device *dev);
extern int platform_pm_thaw(struct device *dev);
extern int platform_pm_thaw_noirq(struct device *dev);
extern int platform_pm_poweroff(struct device *dev);
extern int platform_pm_poweroff_noirq(struct device *dev);
extern int platform_pm_restore(struct device *dev);
extern int platform_pm_restore_noirq(struct device *dev);
#else
#define platform_pm_freeze NULL
#define platform_pm_thaw NULL
#define platform_pm_poweroff NULL
#define platform_pm_restore NULL
#define platform_pm_freeze_noirq NULL
#define platform_pm_thaw_noirq NULL
#define platform_pm_poweroff_noirq NULL
#define platform_pm_restore_noirq NULL
#endif
#ifdef CONFIG_PM_SLEEP
#define USE_PLATFORM_PM_SLEEP_OPS \
.prepare = platform_pm_prepare, \
.complete = platform_pm_complete, \
.suspend = platform_pm_suspend, \
.resume = platform_pm_resume, \
.freeze = platform_pm_freeze, \
.thaw = platform_pm_thaw, \
.poweroff = platform_pm_poweroff, \
.restore = platform_pm_restore, \
.suspend_noirq = platform_pm_suspend_noirq, \
.resume_noirq = platform_pm_resume_noirq, \
.freeze_noirq = platform_pm_freeze_noirq, \
.thaw_noirq = platform_pm_thaw_noirq, \
.poweroff_noirq = platform_pm_poweroff_noirq, \
.restore_noirq = platform_pm_restore_noirq,
#else
#define USE_PLATFORM_PM_SLEEP_OPS
#endif
#endif /* _PLATFORM_DEVICE_H_ */

View File

@ -460,6 +460,7 @@ struct dev_pm_info {
unsigned long active_jiffies;
unsigned long suspended_jiffies;
unsigned long accounting_timestamp;
void *subsys_data; /* Owned by the subsystem. */
#endif
};

View File

@ -245,4 +245,46 @@ static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
__pm_runtime_use_autosuspend(dev, false);
}
struct pm_clk_notifier_block {
struct notifier_block nb;
struct dev_power_domain *pwr_domain;
char *con_ids[];
};
#ifdef CONFIG_PM_RUNTIME_CLK
extern int pm_runtime_clk_init(struct device *dev);
extern void pm_runtime_clk_destroy(struct device *dev);
extern int pm_runtime_clk_add(struct device *dev, const char *con_id);
extern void pm_runtime_clk_remove(struct device *dev, const char *con_id);
extern int pm_runtime_clk_suspend(struct device *dev);
extern int pm_runtime_clk_resume(struct device *dev);
#else
static inline int pm_runtime_clk_init(struct device *dev)
{
return -EINVAL;
}
static inline void pm_runtime_clk_destroy(struct device *dev)
{
}
static inline int pm_runtime_clk_add(struct device *dev, const char *con_id)
{
return -EINVAL;
}
static inline void pm_runtime_clk_remove(struct device *dev, const char *con_id)
{
}
#define pm_runtime_clock_suspend NULL
#define pm_runtime_clock_resume NULL
#endif
#ifdef CONFIG_HAVE_CLK
extern void pm_runtime_clk_add_notifier(struct bus_type *bus,
struct pm_clk_notifier_block *clknb);
#else
static inline void pm_runtime_clk_add_notifier(struct bus_type *bus,
struct pm_clk_notifier_block *clknb)
{
}
#endif
#endif

View File

@ -229,3 +229,7 @@ config PM_OPP
representing individual voltage domains and provides SOC
implementations a ready to use framework to manage OPPs.
For more information, read <file:Documentation/power/opp.txt>
config PM_RUNTIME_CLK
def_bool y
depends on PM_RUNTIME && HAVE_CLK