pmdomain core:

- Add support for s2idle for CPU PM domains on PREEMPT_RT
  - Add device managed version of dev_pm_domain_attach|detach_list()
  - Improve layout of the debugfs summary table
 
 pmdomain providers:
  - amlogic: Remove obsolete vpu domain driver
  - bcm: raspberrypi: Add support for devices used as wakeup-sources
  - imx: Fixup clock handling for imx93 at driver remove
  - rockchip: Add gating support for RK3576
  - rockchip: Add support for RK3576 SoC
  - Some OF parsing simplifications
  - Some simplifications by using dev_err_probe() and guard()
 
 pmdomain consumers:
  - qcom/media/venus: Convert to the device managed APIs for PM domains
 
 cpuidle-psci:
  - Add support for s2idle/s2ram for the hierarchical topology on PREEMPT_RT
  - Some OF parsing simplifications
 -----BEGIN PGP SIGNATURE-----
 
 iQJLBAABCgA1FiEEugLDXPmKSktSkQsV/iaEJXNYjCkFAmbn/g4XHHVsZi5oYW5z
 c29uQGxpbmFyby5vcmcACgkQ/iaEJXNYjCmQQA/9Ghaidyipo+lnK5rabepQP/h0
 RORZq2CBDUY4KlL51B6xAmCh3pI+ke5QtixGcmSn+GaCq7FlUJcwmwvXar7lG8D0
 ptkNMpMHn8vauooWzxBkT43YGq/oIDgbhy5HVeDZGUuUuoG/apSTVYKpXQIl7zan
 Oh2NJBFGs1TKu3Tbio/NYZPRvrj9CmLnXIy3Vy9Gt9/MR9AHJbNwgycNmTA4xWic
 5Q7yizrRnv1gYjfqJszwLESpDyT60vJ7QyAJvyXEEvXvnik8KrR4BiXe78Y1sWMu
 USmWz54MToWFn49QLlIdgWFZsfJSFD1nuTAFxRhrpt5DUzll/xjdERZsboNmYlSb
 ZE1m3twrUlWdSMpT8REiqbPQoAMuIVd+tSOFmS5vydue/5Oj3NFVlvcuWoJdYsQC
 osnNc4qie5ZP59JoJeinA8vy6L5p7pVH2+Ah2Go3sIKEDcVdxiOoBr3Skm2MHTmX
 1ETzJtA0iic3Hf3DuPT8E+VglYyQfJJg7ZjNyEsUGzzxbwvDJIVrCpQcpThbI8oY
 pqRBm8TATPZ5kpcrjNpRp9qz8ScDE8gHejFzkYgST9iB8DvlxJafrUDzymrfbfFR
 Lo7+ij361ML7FEmG+z9YzH9r79yqxxEimQVgi2xZ6DsCc+3UPgUloJmREmvJqZZM
 BFub6Wn5rexPnwjtfg4=
 =nWyc
 -----END PGP SIGNATURE-----

Merge tag 'pmdomain-v6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/linux-pm

Pull pmdomain updates from Ulf Hansson:
 "pmdomain core:
   - Add support for s2idle for CPU PM domains on PREEMPT_RT
   - Add device managed version of dev_pm_domain_attach|detach_list()
   - Improve layout of the debugfs summary table

  pmdomain providers:
   - amlogic: Remove obsolete vpu domain driver
   - bcm: raspberrypi: Add support for devices used as wakeup-sources
   - imx: Fixup clock handling for imx93 at driver remove
   - rockchip: Add gating support for RK3576
   - rockchip: Add support for RK3576 SoC
   - Some OF parsing simplifications
   - Some simplifications by using dev_err_probe() and guard()

  pmdomain consumers:
   - qcom/media/venus: Convert to the device managed APIs for PM domains

  cpuidle-psci:
   - Add support for s2idle/s2ram for the hierarchical topology on
     PREEMPT_RT
   - Some OF parsing simplifications"

* tag 'pmdomain-v6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/linux-pm: (39 commits)
  pmdomain: core: Reduce debug summary table width
  pmdomain: core: Move mode_status_str()
  pmdomain: core: Fix "managed by" alignment in debug summary
  pmdomain: core: Harden inter-column space in debug summary
  pmdomain: rockchip: Add gating masks for rk3576
  pmdomain: rockchip: Add gating support
  pmdomain: rockchip: Simplify dropping OF node reference
  pmdomain: mediatek: make use of dev_err_cast_probe()
  pmdomain: imx93-pd: drop the context variable "init_off"
  pmdomain: imx93-pd: don't unprepare clocks on driver remove
  pmdomain: imx93-pd: replace dev_err() with dev_err_probe()
  pmdomain: qcom: rpmpd: Simplify locking with guard()
  pmdomain: qcom: rpmhpd: Simplify locking with guard()
  pmdomain: qcom: cpr: Simplify locking with guard()
  pmdomain: qcom: cpr: Simplify with dev_err_probe()
  pmdomain: imx: gpcv2: Simplify with scoped for each OF child loop
  pmdomain: imx: gpc: Simplify with scoped for each OF child loop
  pmdomain: rockchip: SimplUlf Hanssonify locking with guard()
  pmdomain: rockchip: Simplify with scoped for each OF child loop
  pmdomain: qcom-cpr: Use scope based of_node_put() to simplify code.
  ...
This commit is contained in:
Linus Torvalds 2024-09-18 10:49:45 +02:00
commit 200289db26
22 changed files with 378 additions and 608 deletions

View File

@ -41,6 +41,7 @@ properties:
- rockchip,rk3368-power-controller
- rockchip,rk3399-power-controller
- rockchip,rk3568-power-controller
- rockchip,rk3576-power-controller
- rockchip,rk3588-power-controller
- rockchip,rv1126-power-controller

View File

@ -276,6 +276,51 @@ err_attach:
}
EXPORT_SYMBOL_GPL(dev_pm_domain_attach_list);
/**
* devm_pm_domain_detach_list - devres-enabled version of dev_pm_domain_detach_list.
* @_list: The list of PM domains to detach.
*
* This function reverse the actions from devm_pm_domain_attach_list().
* it will be invoked during the remove phase from drivers implicitly if driver
* uses devm_pm_domain_attach_list() to attach the PM domains.
*/
static void devm_pm_domain_detach_list(void *_list)
{
struct dev_pm_domain_list *list = _list;
dev_pm_domain_detach_list(list);
}
/**
* devm_pm_domain_attach_list - devres-enabled version of dev_pm_domain_attach_list
* @dev: The device used to lookup the PM domains for.
* @data: The data used for attaching to the PM domains.
* @list: An out-parameter with an allocated list of attached PM domains.
*
* NOTE: this will also handle calling devm_pm_domain_detach_list() for
* you during remove phase.
*
* Returns the number of attached PM domains or a negative error code in case of
* a failure.
*/
int devm_pm_domain_attach_list(struct device *dev,
const struct dev_pm_domain_attach_data *data,
struct dev_pm_domain_list **list)
{
int ret, num_pds;
num_pds = dev_pm_domain_attach_list(dev, data, list);
if (num_pds <= 0)
return num_pds;
ret = devm_add_action_or_reset(dev, devm_pm_domain_detach_list, *list);
if (ret)
return ret;
return num_pds;
}
EXPORT_SYMBOL_GPL(devm_pm_domain_attach_list);
/**
* dev_pm_domain_detach - Detach a device from its PM domain.
* @dev: Device to detach.

View File

@ -67,12 +67,16 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
/*
* Allow power off when OSI has been successfully enabled.
* PREEMPT_RT is not yet ready to enter domain idle states.
* On a PREEMPT_RT based configuration the domain idle states are
* supported, but only during system-wide suspend.
*/
if (use_osi && !IS_ENABLED(CONFIG_PREEMPT_RT))
if (use_osi) {
pd->power_off = psci_pd_power_off;
else
if (IS_ENABLED(CONFIG_PREEMPT_RT))
pd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
} else {
pd->flags |= GENPD_FLAG_ALWAYS_ON;
}
/* Use governor for CPU PM domains if it has some states to manage. */
pd_gov = pd->states ? &pm_domain_cpu_gov : NULL;
@ -138,7 +142,6 @@ static const struct of_device_id psci_of_match[] = {
static int psci_cpuidle_domain_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *node;
bool use_osi = psci_has_osi_support();
int ret = 0, pd_count = 0;
@ -149,15 +152,13 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
* Parse child nodes for the "#power-domain-cells" property and
* initialize a genpd/genpd-of-provider pair when it's found.
*/
for_each_child_of_node(np, node) {
for_each_child_of_node_scoped(np, node) {
if (!of_property_present(node, "#power-domain-cells"))
continue;
ret = psci_pd_init(node, use_osi);
if (ret) {
of_node_put(node);
if (ret)
goto exit;
}
pd_count++;
}

View File

@ -37,6 +37,7 @@ struct psci_cpuidle_data {
static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
static DEFINE_PER_CPU(u32, domain_state);
static bool psci_cpuidle_use_syscore;
static bool psci_cpuidle_use_cpuhp;
void psci_set_domain_state(u32 state)
@ -166,6 +167,12 @@ static struct syscore_ops psci_idle_syscore_ops = {
.resume = psci_idle_syscore_resume,
};
static void psci_idle_init_syscore(void)
{
if (psci_cpuidle_use_syscore)
register_syscore_ops(&psci_idle_syscore_ops);
}
static void psci_idle_init_cpuhp(void)
{
int err;
@ -173,8 +180,6 @@ static void psci_idle_init_cpuhp(void)
if (!psci_cpuidle_use_cpuhp)
return;
register_syscore_ops(&psci_idle_syscore_ops);
err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
"cpuidle/psci:online",
psci_idle_cpuhp_up,
@ -222,22 +227,23 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
if (!psci_has_osi_support())
return 0;
if (IS_ENABLED(CONFIG_PREEMPT_RT))
return 0;
data->dev = dt_idle_attach_cpu(cpu, "psci");
if (IS_ERR_OR_NULL(data->dev))
return PTR_ERR_OR_ZERO(data->dev);
psci_cpuidle_use_syscore = true;
/*
* Using the deepest state for the CPU to trigger a potential selection
* of a shared state for the domain, assumes the domain states are all
* deeper states.
* deeper states. On PREEMPT_RT the hierarchical topology is limited to
* s2ram and s2idle.
*/
drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE;
drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state;
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
psci_cpuidle_use_cpuhp = true;
}
return 0;
}
@ -313,6 +319,7 @@ static void psci_cpu_deinit_idle(int cpu)
struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu);
dt_idle_detach_cpu(data->dev);
psci_cpuidle_use_syscore = false;
psci_cpuidle_use_cpuhp = false;
}
@ -409,6 +416,7 @@ static int psci_cpuidle_probe(struct platform_device *pdev)
goto out_fail;
}
psci_idle_init_syscore();
psci_idle_init_cpuhp();
return 0;

View File

@ -130,11 +130,10 @@ out:
int dt_idle_pd_init_topology(struct device_node *np)
{
struct device_node *node;
struct of_phandle_args child, parent;
int ret;
for_each_child_of_node(np, node) {
for_each_child_of_node_scoped(np, node) {
if (of_parse_phandle_with_args(node, "power-domains",
"#power-domain-cells", 0, &parent))
continue;
@ -143,22 +142,19 @@ int dt_idle_pd_init_topology(struct device_node *np)
child.args_count = 0;
ret = of_genpd_add_subdomain(&parent, &child);
of_node_put(parent.np);
if (ret) {
of_node_put(node);
if (ret)
return ret;
}
}
return 0;
}
int dt_idle_pd_remove_topology(struct device_node *np)
{
struct device_node *node;
struct of_phandle_args child, parent;
int ret;
for_each_child_of_node(np, node) {
for_each_child_of_node_scoped(np, node) {
if (of_parse_phandle_with_args(node, "power-domains",
"#power-domain-cells", 0, &parent))
continue;
@ -167,11 +163,9 @@ int dt_idle_pd_remove_topology(struct device_node *np)
child.args_count = 0;
ret = of_genpd_remove_subdomain(&parent, &child);
of_node_put(parent.np);
if (ret) {
of_node_put(node);
if (ret)
return ret;
}
}
return 0;
}

View File

@ -876,7 +876,7 @@ static int vcodec_domains_get(struct venus_core *core)
if (!res->vcodec_pmdomains_num)
goto skip_pmdomains;
ret = dev_pm_domain_attach_list(dev, &vcodec_data, &core->pmdomains);
ret = devm_pm_domain_attach_list(dev, &vcodec_data, &core->pmdomains);
if (ret < 0)
return ret;
@ -902,14 +902,11 @@ skip_pmdomains:
return 0;
opp_attach_err:
dev_pm_domain_detach_list(core->pmdomains);
return ret;
}
static void vcodec_domains_put(struct venus_core *core)
{
dev_pm_domain_detach_list(core->pmdomains);
if (!core->has_opp_table)
return;

View File

@ -1,17 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "Amlogic PM Domains"
config MESON_GX_PM_DOMAINS
tristate "Amlogic Meson GX Power Domains driver"
depends on ARCH_MESON || COMPILE_TEST
depends on PM && OF
default ARCH_MESON
select PM_GENERIC_DOMAINS
select PM_GENERIC_DOMAINS_OF
help
Say yes to expose Amlogic Meson GX Power Domains as
Generic Power Domains.
config MESON_EE_PM_DOMAINS
tristate "Amlogic Meson Everything-Else Power Domains driver"
depends on ARCH_MESON || COMPILE_TEST

View File

@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MESON_GX_PM_DOMAINS) += meson-gx-pwrc-vpu.o
obj-$(CONFIG_MESON_EE_PM_DOMAINS) += meson-ee-pwrc.o
obj-$(CONFIG_MESON_SECURE_PM_DOMAINS) += meson-secure-pwrc.o

View File

@ -1,380 +0,0 @@
/*
* Copyright (c) 2017 BayLibre, SAS
* Author: Neil Armstrong <narmstrong@baylibre.com>
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/bitfield.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/reset.h>
#include <linux/clk.h>
#include <linux/module.h>
/* AO Offsets */
#define AO_RTI_GEN_PWR_SLEEP0 (0x3a << 2)
#define GEN_PWR_VPU_HDMI BIT(8)
#define GEN_PWR_VPU_HDMI_ISO BIT(9)
/* HHI Offsets */
#define HHI_MEM_PD_REG0 (0x40 << 2)
#define HHI_VPU_MEM_PD_REG0 (0x41 << 2)
#define HHI_VPU_MEM_PD_REG1 (0x42 << 2)
#define HHI_VPU_MEM_PD_REG2 (0x4d << 2)
struct meson_gx_pwrc_vpu {
struct generic_pm_domain genpd;
struct regmap *regmap_ao;
struct regmap *regmap_hhi;
struct reset_control *rstc;
struct clk *vpu_clk;
struct clk *vapb_clk;
};
static inline
struct meson_gx_pwrc_vpu *genpd_to_pd(struct generic_pm_domain *d)
{
return container_of(d, struct meson_gx_pwrc_vpu, genpd);
}
static int meson_gx_pwrc_vpu_power_off(struct generic_pm_domain *genpd)
{
struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
int i;
regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VPU_HDMI_ISO, GEN_PWR_VPU_HDMI_ISO);
udelay(20);
/* Power Down Memories */
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
0x3 << i, 0x3 << i);
udelay(5);
}
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
0x3 << i, 0x3 << i);
udelay(5);
}
for (i = 8; i < 16; i++) {
regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
BIT(i), BIT(i));
udelay(5);
}
udelay(20);
regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VPU_HDMI, GEN_PWR_VPU_HDMI);
msleep(20);
clk_disable_unprepare(pd->vpu_clk);
clk_disable_unprepare(pd->vapb_clk);
return 0;
}
static int meson_g12a_pwrc_vpu_power_off(struct generic_pm_domain *genpd)
{
struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
int i;
regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VPU_HDMI_ISO, GEN_PWR_VPU_HDMI_ISO);
udelay(20);
/* Power Down Memories */
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
0x3 << i, 0x3 << i);
udelay(5);
}
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
0x3 << i, 0x3 << i);
udelay(5);
}
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG2,
0x3 << i, 0x3 << i);
udelay(5);
}
for (i = 8; i < 16; i++) {
regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
BIT(i), BIT(i));
udelay(5);
}
udelay(20);
regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VPU_HDMI, GEN_PWR_VPU_HDMI);
msleep(20);
clk_disable_unprepare(pd->vpu_clk);
clk_disable_unprepare(pd->vapb_clk);
return 0;
}
static int meson_gx_pwrc_vpu_setup_clk(struct meson_gx_pwrc_vpu *pd)
{
int ret;
ret = clk_prepare_enable(pd->vpu_clk);
if (ret)
return ret;
ret = clk_prepare_enable(pd->vapb_clk);
if (ret)
clk_disable_unprepare(pd->vpu_clk);
return ret;
}
static int meson_gx_pwrc_vpu_power_on(struct generic_pm_domain *genpd)
{
struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
int ret;
int i;
regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VPU_HDMI, 0);
udelay(20);
/* Power Up Memories */
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
0x3 << i, 0);
udelay(5);
}
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
0x3 << i, 0);
udelay(5);
}
for (i = 8; i < 16; i++) {
regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
BIT(i), 0);
udelay(5);
}
udelay(20);
ret = reset_control_assert(pd->rstc);
if (ret)
return ret;
regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VPU_HDMI_ISO, 0);
ret = reset_control_deassert(pd->rstc);
if (ret)
return ret;
ret = meson_gx_pwrc_vpu_setup_clk(pd);
if (ret)
return ret;
return 0;
}
static int meson_g12a_pwrc_vpu_power_on(struct generic_pm_domain *genpd)
{
struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
int ret;
int i;
regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VPU_HDMI, 0);
udelay(20);
/* Power Up Memories */
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
0x3 << i, 0);
udelay(5);
}
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
0x3 << i, 0);
udelay(5);
}
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG2,
0x3 << i, 0);
udelay(5);
}
for (i = 8; i < 16; i++) {
regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
BIT(i), 0);
udelay(5);
}
udelay(20);
ret = reset_control_assert(pd->rstc);
if (ret)
return ret;
regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VPU_HDMI_ISO, 0);
ret = reset_control_deassert(pd->rstc);
if (ret)
return ret;
ret = meson_gx_pwrc_vpu_setup_clk(pd);
if (ret)
return ret;
return 0;
}
static bool meson_gx_pwrc_vpu_get_power(struct meson_gx_pwrc_vpu *pd)
{
u32 reg;
regmap_read(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, &reg);
return (reg & GEN_PWR_VPU_HDMI);
}
static struct meson_gx_pwrc_vpu vpu_hdmi_pd = {
.genpd = {
.name = "vpu_hdmi",
.power_off = meson_gx_pwrc_vpu_power_off,
.power_on = meson_gx_pwrc_vpu_power_on,
},
};
static struct meson_gx_pwrc_vpu vpu_hdmi_pd_g12a = {
.genpd = {
.name = "vpu_hdmi",
.power_off = meson_g12a_pwrc_vpu_power_off,
.power_on = meson_g12a_pwrc_vpu_power_on,
},
};
static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
{
const struct meson_gx_pwrc_vpu *vpu_pd_match;
struct regmap *regmap_ao, *regmap_hhi;
struct meson_gx_pwrc_vpu *vpu_pd;
struct device_node *parent_np;
struct reset_control *rstc;
struct clk *vpu_clk;
struct clk *vapb_clk;
bool powered_off;
int ret;
vpu_pd_match = of_device_get_match_data(&pdev->dev);
if (!vpu_pd_match) {
dev_err(&pdev->dev, "failed to get match data\n");
return -ENODEV;
}
vpu_pd = devm_kzalloc(&pdev->dev, sizeof(*vpu_pd), GFP_KERNEL);
if (!vpu_pd)
return -ENOMEM;
memcpy(vpu_pd, vpu_pd_match, sizeof(*vpu_pd));
parent_np = of_get_parent(pdev->dev.of_node);
regmap_ao = syscon_node_to_regmap(parent_np);
of_node_put(parent_np);
if (IS_ERR(regmap_ao)) {
dev_err(&pdev->dev, "failed to get regmap\n");
return PTR_ERR(regmap_ao);
}
regmap_hhi = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"amlogic,hhi-sysctrl");
if (IS_ERR(regmap_hhi)) {
dev_err(&pdev->dev, "failed to get HHI regmap\n");
return PTR_ERR(regmap_hhi);
}
rstc = devm_reset_control_array_get_exclusive(&pdev->dev);
if (IS_ERR(rstc))
return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
"failed to get reset lines\n");
vpu_clk = devm_clk_get(&pdev->dev, "vpu");
if (IS_ERR(vpu_clk)) {
dev_err(&pdev->dev, "vpu clock request failed\n");
return PTR_ERR(vpu_clk);
}
vapb_clk = devm_clk_get(&pdev->dev, "vapb");
if (IS_ERR(vapb_clk)) {
dev_err(&pdev->dev, "vapb clock request failed\n");
return PTR_ERR(vapb_clk);
}
vpu_pd->regmap_ao = regmap_ao;
vpu_pd->regmap_hhi = regmap_hhi;
vpu_pd->rstc = rstc;
vpu_pd->vpu_clk = vpu_clk;
vpu_pd->vapb_clk = vapb_clk;
platform_set_drvdata(pdev, vpu_pd);
powered_off = meson_gx_pwrc_vpu_get_power(vpu_pd);
/* If already powered, sync the clock states */
if (!powered_off) {
ret = meson_gx_pwrc_vpu_setup_clk(vpu_pd);
if (ret)
return ret;
}
vpu_pd->genpd.flags = GENPD_FLAG_ALWAYS_ON;
pm_genpd_init(&vpu_pd->genpd, NULL, powered_off);
return of_genpd_add_provider_simple(pdev->dev.of_node,
&vpu_pd->genpd);
}
static void meson_gx_pwrc_vpu_shutdown(struct platform_device *pdev)
{
struct meson_gx_pwrc_vpu *vpu_pd = platform_get_drvdata(pdev);
bool powered_off;
powered_off = meson_gx_pwrc_vpu_get_power(vpu_pd);
if (!powered_off)
vpu_pd->genpd.power_off(&vpu_pd->genpd);
}
static const struct of_device_id meson_gx_pwrc_vpu_match_table[] = {
{ .compatible = "amlogic,meson-gx-pwrc-vpu", .data = &vpu_hdmi_pd },
{
.compatible = "amlogic,meson-g12a-pwrc-vpu",
.data = &vpu_hdmi_pd_g12a
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_gx_pwrc_vpu_match_table);
static struct platform_driver meson_gx_pwrc_vpu_driver = {
.probe = meson_gx_pwrc_vpu_probe,
.shutdown = meson_gx_pwrc_vpu_shutdown,
.driver = {
.name = "meson_gx_pwrc_vpu",
.of_match_table = meson_gx_pwrc_vpu_match_table,
},
};
module_platform_driver(meson_gx_pwrc_vpu_driver);
MODULE_DESCRIPTION("Amlogic Meson GX Power Domains driver");
MODULE_LICENSE("GPL v2");

View File

@ -177,7 +177,7 @@ static int apple_pmgr_reset_status(struct reset_controller_dev *rcdev, unsigned
return !!(reg & APPLE_PMGR_RESET);
}
const struct reset_control_ops apple_pmgr_reset_ops = {
static const struct reset_control_ops apple_pmgr_reset_ops = {
.assert = apple_pmgr_reset_assert,
.deassert = apple_pmgr_reset_deassert,
.reset = apple_pmgr_reset_reset,

View File

@ -41,40 +41,46 @@ struct rpi_power_domains {
*/
struct rpi_power_domain_packet {
u32 domain;
u32 on;
u32 state;
};
/*
* Asks the firmware to enable or disable power on a specific power
* domain.
*/
static int rpi_firmware_set_power(struct rpi_power_domain *rpi_domain, bool on)
static int rpi_firmware_set_power(struct generic_pm_domain *domain, bool on)
{
struct rpi_power_domain *rpi_domain =
container_of(domain, struct rpi_power_domain, base);
bool old_interface = rpi_domain->old_interface;
struct rpi_power_domain_packet packet;
int ret;
packet.domain = rpi_domain->domain;
packet.on = on;
return rpi_firmware_property(rpi_domain->fw,
rpi_domain->old_interface ?
packet.state = on;
ret = rpi_firmware_property(rpi_domain->fw, old_interface ?
RPI_FIRMWARE_SET_POWER_STATE :
RPI_FIRMWARE_SET_DOMAIN_STATE,
&packet, sizeof(packet));
if (ret)
dev_err(&domain->dev, "Failed to set %s to %u (%d)\n",
old_interface ? "power" : "domain", on, ret);
else
dev_dbg(&domain->dev, "Set %s to %u\n",
old_interface ? "power" : "domain", on);
return ret;
}
static int rpi_domain_off(struct generic_pm_domain *domain)
{
struct rpi_power_domain *rpi_domain =
container_of(domain, struct rpi_power_domain, base);
return rpi_firmware_set_power(rpi_domain, false);
return rpi_firmware_set_power(domain, false);
}
static int rpi_domain_on(struct generic_pm_domain *domain)
{
struct rpi_power_domain *rpi_domain =
container_of(domain, struct rpi_power_domain, base);
return rpi_firmware_set_power(rpi_domain, true);
return rpi_firmware_set_power(domain, true);
}
static void rpi_common_init_power_domain(struct rpi_power_domains *rpi_domains,
@ -85,6 +91,7 @@ static void rpi_common_init_power_domain(struct rpi_power_domains *rpi_domains,
dom->fw = rpi_domains->fw;
dom->base.name = name;
dom->base.flags = GENPD_FLAG_ACTIVE_WAKEUP;
dom->base.power_on = rpi_domain_on;
dom->base.power_off = rpi_domain_off;
@ -142,13 +149,13 @@ rpi_has_new_domain_support(struct rpi_power_domains *rpi_domains)
int ret;
packet.domain = RPI_POWER_DOMAIN_ARM;
packet.on = ~0;
packet.state = ~0;
ret = rpi_firmware_property(rpi_domains->fw,
RPI_FIRMWARE_GET_DOMAIN_STATE,
&packet, sizeof(packet));
return ret == 0 && packet.on != ~0;
return ret == 0 && packet.state != ~0;
}
static int rpi_power_probe(struct platform_device *pdev)

View File

@ -117,6 +117,48 @@ static const struct genpd_lock_ops genpd_spin_ops = {
.unlock = genpd_unlock_spin,
};
static void genpd_lock_raw_spin(struct generic_pm_domain *genpd)
__acquires(&genpd->raw_slock)
{
unsigned long flags;
raw_spin_lock_irqsave(&genpd->raw_slock, flags);
genpd->raw_lock_flags = flags;
}
static void genpd_lock_nested_raw_spin(struct generic_pm_domain *genpd,
int depth)
__acquires(&genpd->raw_slock)
{
unsigned long flags;
raw_spin_lock_irqsave_nested(&genpd->raw_slock, flags, depth);
genpd->raw_lock_flags = flags;
}
static int genpd_lock_interruptible_raw_spin(struct generic_pm_domain *genpd)
__acquires(&genpd->raw_slock)
{
unsigned long flags;
raw_spin_lock_irqsave(&genpd->raw_slock, flags);
genpd->raw_lock_flags = flags;
return 0;
}
static void genpd_unlock_raw_spin(struct generic_pm_domain *genpd)
__releases(&genpd->raw_slock)
{
raw_spin_unlock_irqrestore(&genpd->raw_slock, genpd->raw_lock_flags);
}
static const struct genpd_lock_ops genpd_raw_spin_ops = {
.lock = genpd_lock_raw_spin,
.lock_nested = genpd_lock_nested_raw_spin,
.lock_interruptible = genpd_lock_interruptible_raw_spin,
.unlock = genpd_unlock_raw_spin,
};
#define genpd_lock(p) p->lock_ops->lock(p)
#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
@ -1758,7 +1800,6 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
genpd_lock(genpd);
genpd_set_cpumask(genpd, gpd_data->cpu);
dev_pm_domain_set(dev, &genpd->domain);
genpd->device_count++;
if (gd)
@ -1767,6 +1808,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
genpd_unlock(genpd);
dev_pm_domain_set(dev, &genpd->domain);
out:
if (ret)
genpd_free_dev_data(dev, gpd_data);
@ -1823,12 +1865,13 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
genpd->gd->max_off_time_changed = true;
genpd_clear_cpumask(genpd, gpd_data->cpu);
dev_pm_domain_set(dev, NULL);
list_del_init(&pdd->list_node);
genpd_unlock(genpd);
dev_pm_domain_set(dev, NULL);
if (genpd->detach_dev)
genpd->detach_dev(genpd, dev);
@ -2143,7 +2186,10 @@ static void genpd_free_data(struct generic_pm_domain *genpd)
static void genpd_lock_init(struct generic_pm_domain *genpd)
{
if (genpd_is_irq_safe(genpd)) {
if (genpd_is_cpu_domain(genpd)) {
raw_spin_lock_init(&genpd->raw_slock);
genpd->lock_ops = &genpd_raw_spin_ops;
} else if (genpd_is_irq_safe(genpd)) {
spin_lock_init(&genpd->slock);
genpd->lock_ops = &genpd_spin_ops;
} else {
@ -3181,7 +3227,16 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev)
else
WARN_ON(1);
seq_printf(s, "%-25s ", p);
seq_printf(s, "%-26s ", p);
}
static void perf_status_str(struct seq_file *s, struct device *dev)
{
struct generic_pm_domain_data *gpd_data;
gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
seq_printf(s, "%-10u ", gpd_data->performance_state);
}
static void mode_status_str(struct seq_file *s, struct device *dev)
@ -3190,15 +3245,7 @@ static void mode_status_str(struct seq_file *s, struct device *dev)
gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
seq_printf(s, "%20s", gpd_data->hw_mode ? "HW" : "SW");
}
static void perf_status_str(struct seq_file *s, struct device *dev)
{
struct generic_pm_domain_data *gpd_data;
gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
seq_put_decimal_ull(s, "", gpd_data->performance_state);
seq_printf(s, "%2s", gpd_data->hw_mode ? "HW" : "SW");
}
static int genpd_summary_one(struct seq_file *s,
@ -3209,7 +3256,6 @@ static int genpd_summary_one(struct seq_file *s,
[GENPD_STATE_OFF] = "off"
};
struct pm_domain_data *pm_data;
const char *kobj_path;
struct gpd_link *link;
char state[16];
int ret;
@ -3226,7 +3272,7 @@ static int genpd_summary_one(struct seq_file *s,
else
snprintf(state, sizeof(state), "%s",
status_lookup[genpd->status]);
seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state);
seq_printf(s, "%-30s %-30s %u", genpd->name, state, genpd->performance_state);
/*
* Modifications on the list require holding locks on both
@ -3242,17 +3288,10 @@ static int genpd_summary_one(struct seq_file *s,
}
list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
kobj_path = kobject_get_path(&pm_data->dev->kobj,
genpd_is_irq_safe(genpd) ?
GFP_ATOMIC : GFP_KERNEL);
if (kobj_path == NULL)
continue;
seq_printf(s, "\n %-50s ", kobj_path);
seq_printf(s, "\n %-30s ", dev_name(pm_data->dev));
rtpm_status_str(s, pm_data->dev);
perf_status_str(s, pm_data->dev);
mode_status_str(s, pm_data->dev);
kfree(kobj_path);
}
seq_puts(s, "\n");
@ -3269,7 +3308,7 @@ static int summary_show(struct seq_file *s, void *data)
seq_puts(s, "domain status children performance\n");
seq_puts(s, " /device runtime status managed by\n");
seq_puts(s, "------------------------------------------------------------------------------------------------------------\n");
seq_puts(s, "------------------------------------------------------------------------------\n");
ret = mutex_lock_interruptible(&gpd_list_lock);
if (ret)
@ -3421,23 +3460,14 @@ static int devices_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
struct pm_domain_data *pm_data;
const char *kobj_path;
int ret = 0;
ret = genpd_lock_interruptible(genpd);
if (ret)
return -ERESTARTSYS;
list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
kobj_path = kobject_get_path(&pm_data->dev->kobj,
genpd_is_irq_safe(genpd) ?
GFP_ATOMIC : GFP_KERNEL);
if (kobj_path == NULL)
continue;
seq_printf(s, "%s\n", kobj_path);
kfree(kobj_path);
}
list_for_each_entry(pm_data, &genpd->dev_list, list_node)
seq_printf(s, "%s\n", dev_name(pm_data->dev));
genpd_unlock(genpd);
return ret;

View File

@ -455,7 +455,6 @@ static int imx_gpc_probe(struct platform_device *pdev)
} else {
struct imx_pm_domain *domain;
struct platform_device *pd_pdev;
struct device_node *np;
struct clk *ipg_clk;
unsigned int ipg_rate_mhz;
int domain_index;
@ -465,28 +464,24 @@ static int imx_gpc_probe(struct platform_device *pdev)
return PTR_ERR(ipg_clk);
ipg_rate_mhz = clk_get_rate(ipg_clk) / 1000000;
for_each_child_of_node(pgc_node, np) {
for_each_child_of_node_scoped(pgc_node, np) {
ret = of_property_read_u32(np, "reg", &domain_index);
if (ret) {
of_node_put(np);
if (ret)
return ret;
}
if (domain_index >= of_id_data->num_domains)
continue;
pd_pdev = platform_device_alloc("imx-pgc-power-domain",
domain_index);
if (!pd_pdev) {
of_node_put(np);
if (!pd_pdev)
return -ENOMEM;
}
ret = platform_device_add_data(pd_pdev,
&imx_gpc_domains[domain_index],
sizeof(imx_gpc_domains[domain_index]));
if (ret) {
platform_device_put(pd_pdev);
of_node_put(np);
return ret;
}
domain = pd_pdev->dev.platform_data;
@ -500,7 +495,6 @@ static int imx_gpc_probe(struct platform_device *pdev)
ret = platform_device_add(pd_pdev);
if (ret) {
platform_device_put(pd_pdev);
of_node_put(np);
return ret;
}
}

View File

@ -1458,7 +1458,7 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
.max_register = SZ_4K,
};
struct device *dev = &pdev->dev;
struct device_node *pgc_np, *np;
struct device_node *pgc_np;
struct regmap *regmap;
void __iomem *base;
int ret;
@ -1480,7 +1480,7 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
return ret;
}
for_each_child_of_node(pgc_np, np) {
for_each_child_of_node_scoped(pgc_np, np) {
struct platform_device *pd_pdev;
struct imx_pgc_domain *domain;
u32 domain_index;
@ -1491,7 +1491,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "reg", &domain_index);
if (ret) {
dev_err(dev, "Failed to read 'reg' property\n");
of_node_put(np);
return ret;
}
@ -1506,7 +1505,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
domain_index);
if (!pd_pdev) {
dev_err(dev, "Failed to allocate platform device\n");
of_node_put(np);
return -ENOMEM;
}
@ -1515,7 +1513,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
sizeof(domain_data->domains[domain_index]));
if (ret) {
platform_device_put(pd_pdev);
of_node_put(np);
return ret;
}
@ -1532,7 +1529,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
ret = platform_device_add(pd_pdev);
if (ret) {
platform_device_put(pd_pdev);
of_node_put(np);
return ret;
}
}

View File

@ -28,7 +28,6 @@ struct imx93_power_domain {
void __iomem *addr;
struct clk_bulk_data *clks;
int num_clks;
bool init_off;
};
#define to_imx93_pd(_genpd) container_of(_genpd, struct imx93_power_domain, genpd)
@ -90,9 +89,6 @@ static void imx93_pd_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
if (!domain->init_off)
clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
of_genpd_del_provider(np);
pm_genpd_remove(&domain->genpd);
}
@ -102,6 +98,7 @@ static int imx93_pd_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct imx93_power_domain *domain;
bool init_off;
int ret;
domain = devm_kzalloc(dev, sizeof(*domain), GFP_KERNEL);
@ -121,18 +118,17 @@ static int imx93_pd_probe(struct platform_device *pdev)
domain->genpd.power_on = imx93_pd_on;
domain->dev = dev;
domain->init_off = readl(domain->addr + MIX_FUNC_STAT_OFF) & FUNC_STAT_ISO_STAT_MASK;
init_off = readl(domain->addr + MIX_FUNC_STAT_OFF) & FUNC_STAT_ISO_STAT_MASK;
/* Just to sync the status of hardware */
if (!domain->init_off) {
if (!init_off) {
ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
if (ret) {
dev_err(domain->dev, "failed to enable clocks for domain: %s\n",
if (ret)
return dev_err_probe(domain->dev, ret,
"failed to enable clocks for domain: %s\n",
domain->genpd.name);
return ret;
}
}
ret = pm_genpd_init(&domain->genpd, NULL, domain->init_off);
ret = pm_genpd_init(&domain->genpd, NULL, init_off);
if (ret)
goto err_clk_unprepare;
@ -148,7 +144,7 @@ err_genpd_remove:
pm_genpd_remove(&domain->genpd);
err_clk_unprepare:
if (!domain->init_off)
if (!init_off)
clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
return ret;

View File

@ -398,12 +398,10 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
scpsys->dev->of_node = node;
pd->supply = devm_regulator_get(scpsys->dev, "domain");
scpsys->dev->of_node = root_node;
if (IS_ERR(pd->supply)) {
dev_err_probe(scpsys->dev, PTR_ERR(pd->supply),
if (IS_ERR(pd->supply))
return dev_err_cast_probe(scpsys->dev, pd->supply,
"%pOF: failed to get power supply.\n",
node);
return ERR_CAST(pd->supply);
}
}
pd->infracfg = syscon_regmap_lookup_by_phandle_optional(node, "mediatek,infracfg");

View File

@ -4,6 +4,7 @@
* Copyright (c) 2019, Linaro Limited
*/
#include <linux/cleanup.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/debugfs.h>
@ -747,9 +748,9 @@ static int cpr_set_performance_state(struct generic_pm_domain *domain,
struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
struct corner *corner, *end;
enum voltage_change_dir dir;
int ret = 0, new_uV;
int ret, new_uV;
mutex_lock(&drv->lock);
guard(mutex)(&drv->lock);
dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n",
__func__, state, cpr_get_cur_perf_state(drv));
@ -760,10 +761,8 @@ static int cpr_set_performance_state(struct generic_pm_domain *domain,
*/
corner = drv->corners + state - 1;
end = &drv->corners[drv->num_corners - 1];
if (corner > end || corner < drv->corners) {
ret = -EINVAL;
goto unlock;
}
if (corner > end || corner < drv->corners)
return -EINVAL;
/* Determine direction */
if (drv->corner > corner)
@ -783,7 +782,7 @@ static int cpr_set_performance_state(struct generic_pm_domain *domain,
ret = cpr_scale_voltage(drv, corner, new_uV, dir);
if (ret)
goto unlock;
return ret;
if (cpr_is_allowed(drv)) {
cpr_irq_clr(drv);
@ -794,10 +793,7 @@ static int cpr_set_performance_state(struct generic_pm_domain *domain,
drv->corner = corner;
unlock:
mutex_unlock(&drv->lock);
return ret;
return 0;
}
static int
@ -1040,36 +1036,30 @@ static unsigned int cpr_get_fuse_corner(struct dev_pm_opp *opp)
static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref,
struct device *cpu_dev)
{
u64 rate = 0;
struct device_node *ref_np;
struct device_node *desc_np;
struct device_node *child_np = NULL;
struct device_node *child_req_np = NULL;
struct device_node *ref_np __free(device_node) = NULL;
struct device_node *desc_np __free(device_node) =
dev_pm_opp_of_get_opp_desc_node(cpu_dev);
desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
if (!desc_np)
return 0;
ref_np = dev_pm_opp_get_of_node(ref);
if (!ref_np)
goto out_ref;
return 0;
do {
of_node_put(child_req_np);
child_np = of_get_next_available_child(desc_np, child_np);
child_req_np = of_parse_phandle(child_np, "required-opps", 0);
} while (child_np && child_req_np != ref_np);
for_each_available_child_of_node_scoped(desc_np, child_np) {
struct device_node *child_req_np __free(device_node) =
of_parse_phandle(child_np, "required-opps", 0);
if (child_req_np == ref_np) {
u64 rate;
if (child_np && child_req_np == ref_np)
of_property_read_u64(child_np, "opp-hz", &rate);
of_node_put(child_req_np);
of_node_put(child_np);
of_node_put(ref_np);
out_ref:
of_node_put(desc_np);
return (unsigned long) rate;
}
}
return 0;
}
static int cpr_corner_init(struct cpr_drv *drv)
@ -1443,9 +1433,9 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
{
struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
const struct acc_desc *acc_desc = drv->acc_desc;
int ret = 0;
int ret;
mutex_lock(&drv->lock);
guard(mutex)(&drv->lock);
dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev));
@ -1457,7 +1447,7 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
* additional initialization when further CPUs get attached.
*/
if (drv->attached_cpu_dev)
goto unlock;
return 0;
/*
* cpr_scale_voltage() requires the direction (if we are changing
@ -1469,12 +1459,10 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
* the first time cpr_set_performance_state() is called.
*/
drv->cpu_clk = devm_clk_get(dev, NULL);
if (IS_ERR(drv->cpu_clk)) {
ret = PTR_ERR(drv->cpu_clk);
if (ret != -EPROBE_DEFER)
dev_err(drv->dev, "could not get cpu clk: %d\n", ret);
goto unlock;
}
if (IS_ERR(drv->cpu_clk))
return dev_err_probe(drv->dev, PTR_ERR(drv->cpu_clk),
"could not get cpu clk\n");
drv->attached_cpu_dev = dev;
dev_dbg(drv->dev, "using cpu clk from: %s\n",
@ -1491,42 +1479,39 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
ret = dev_pm_opp_get_opp_count(&drv->pd.dev);
if (ret < 0) {
dev_err(drv->dev, "could not get OPP count\n");
goto unlock;
return ret;
}
drv->num_corners = ret;
if (drv->num_corners < 2) {
dev_err(drv->dev, "need at least 2 OPPs to use CPR\n");
ret = -EINVAL;
goto unlock;
return -EINVAL;
}
drv->corners = devm_kcalloc(drv->dev, drv->num_corners,
sizeof(*drv->corners),
GFP_KERNEL);
if (!drv->corners) {
ret = -ENOMEM;
goto unlock;
}
if (!drv->corners)
return -ENOMEM;
ret = cpr_corner_init(drv);
if (ret)
goto unlock;
return ret;
cpr_set_loop_allowed(drv);
ret = cpr_init_parameters(drv);
if (ret)
goto unlock;
return ret;
/* Configure CPR HW but keep it disabled */
ret = cpr_config(drv);
if (ret)
goto unlock;
return ret;
ret = cpr_find_initial_corner(drv);
if (ret)
goto unlock;
return ret;
if (acc_desc->config)
regmap_multi_reg_write(drv->tcsr, acc_desc->config,
@ -1541,10 +1526,7 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
dev_info(drv->dev, "driver initialized with %u OPPs\n",
drv->num_corners);
unlock:
mutex_unlock(&drv->lock);
return ret;
return 0;
}
static int cpr_debug_info_show(struct seq_file *s, void *unused)

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@ -775,9 +776,9 @@ static int rpmhpd_set_performance_state(struct generic_pm_domain *domain,
unsigned int level)
{
struct rpmhpd *pd = domain_to_rpmhpd(domain);
int ret = 0, i;
int ret, i;
mutex_lock(&rpmhpd_lock);
guard(mutex)(&rpmhpd_lock);
for (i = 0; i < pd->level_count; i++)
if (level <= pd->level[i])
@ -797,14 +798,12 @@ static int rpmhpd_set_performance_state(struct generic_pm_domain *domain,
ret = rpmhpd_aggregate_corner(pd, i);
if (ret)
goto out;
return ret;
}
pd->corner = i;
out:
mutex_unlock(&rpmhpd_lock);
return ret;
return 0;
}
static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd)

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. */
#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@ -1024,20 +1025,17 @@ static int rpmpd_power_on(struct generic_pm_domain *domain)
int ret;
struct rpmpd *pd = domain_to_rpmpd(domain);
mutex_lock(&rpmpd_lock);
guard(mutex)(&rpmpd_lock);
ret = rpmpd_send_enable(pd, true);
if (ret)
goto out;
return ret;
pd->enabled = true;
if (pd->corner)
ret = rpmpd_aggregate_corner(pd);
out:
mutex_unlock(&rpmpd_lock);
return ret;
}
@ -1060,27 +1058,21 @@ static int rpmpd_power_off(struct generic_pm_domain *domain)
static int rpmpd_set_performance(struct generic_pm_domain *domain,
unsigned int state)
{
int ret = 0;
struct rpmpd *pd = domain_to_rpmpd(domain);
if (state > pd->max_state)
state = pd->max_state;
mutex_lock(&rpmpd_lock);
guard(mutex)(&rpmpd_lock);
pd->corner = state;
/* Always send updates for vfc and vfl */
if (!pd->enabled && pd->key != cpu_to_le32(KEY_FLOOR_CORNER) &&
pd->key != cpu_to_le32(KEY_FLOOR_LEVEL))
goto out;
return 0;
ret = rpmpd_aggregate_corner(pd);
out:
mutex_unlock(&rpmpd_lock);
return ret;
return rpmpd_aggregate_corner(pd);
}
static int rpmpd_probe(struct platform_device *pdev)

View File

@ -33,6 +33,7 @@
#include <dt-bindings/power/rk3368-power.h>
#include <dt-bindings/power/rk3399-power.h>
#include <dt-bindings/power/rk3568-power.h>
#include <dt-bindings/power/rockchip,rk3576-power.h>
#include <dt-bindings/power/rk3588-power.h>
struct rockchip_domain_info {
@ -45,6 +46,7 @@ struct rockchip_domain_info {
bool active_wakeup;
int pwr_w_mask;
int req_w_mask;
int clk_ungate_mask;
int mem_status_mask;
int repair_status_mask;
u32 pwr_offset;
@ -62,6 +64,7 @@ struct rockchip_pmu_info {
u32 chain_status_offset;
u32 mem_status_offset;
u32 repair_status_offset;
u32 clk_ungate_offset;
u32 core_pwrcnt_offset;
u32 gpu_pwrcnt_offset;
@ -144,6 +147,25 @@ struct rockchip_pmu {
.active_wakeup = wakeup, \
}
#define DOMAIN_M_O_R_G(_name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, ack, g_mask, wakeup) \
{ \
.name = _name, \
.pwr_offset = p_offset, \
.pwr_w_mask = (pwr) << 16, \
.pwr_mask = (pwr), \
.status_mask = (status), \
.mem_offset = m_offset, \
.mem_status_mask = (m_status), \
.repair_status_mask = (r_status), \
.req_offset = r_offset, \
.req_w_mask = (req) << 16, \
.req_mask = (req), \
.idle_mask = (idle), \
.clk_ungate_mask = (g_mask), \
.ack_mask = (ack), \
.active_wakeup = wakeup, \
}
#define DOMAIN_RK3036(_name, req, ack, idle, wakeup) \
{ \
.name = _name, \
@ -175,6 +197,9 @@ struct rockchip_pmu {
#define DOMAIN_RK3568(name, pwr, req, wakeup) \
DOMAIN_M(name, pwr, pwr, req, req, req, wakeup)
#define DOMAIN_RK3576(name, p_offset, pwr, status, r_status, r_offset, req, idle, g_mask, wakeup) \
DOMAIN_M_O_R_G(name, p_offset, pwr, status, 0, r_status, r_status, r_offset, req, idle, idle, g_mask, wakeup)
/*
* Dynamic Memory Controller may need to coordinate with us -- see
* rockchip_pmu_block().
@ -299,6 +324,26 @@ static unsigned int rockchip_pmu_read_ack(struct rockchip_pmu *pmu)
return val;
}
static int rockchip_pmu_ungate_clk(struct rockchip_pm_domain *pd, bool ungate)
{
const struct rockchip_domain_info *pd_info = pd->info;
struct rockchip_pmu *pmu = pd->pmu;
unsigned int val;
int clk_ungate_w_mask = pd_info->clk_ungate_mask << 16;
if (!pd_info->clk_ungate_mask)
return 0;
if (!pmu->info->clk_ungate_offset)
return 0;
val = ungate ? (pd_info->clk_ungate_mask | clk_ungate_w_mask) :
clk_ungate_w_mask;
regmap_write(pmu->regmap, pmu->info->clk_ungate_offset, val);
return 0;
}
static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
bool idle)
{
@ -539,6 +584,8 @@ static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
return ret;
}
rockchip_pmu_ungate_clk(pd, true);
if (!power_on) {
rockchip_pmu_save_qos(pd);
@ -555,6 +602,7 @@ static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
rockchip_pmu_restore_qos(pd);
}
rockchip_pmu_ungate_clk(pd, false);
clk_bulk_disable(pd->num_clks, pd->clks);
}
@ -712,12 +760,11 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
goto err_unprepare_clocks;
}
pd->qos_regmap[j] = syscon_node_to_regmap(qos_node);
of_node_put(qos_node);
if (IS_ERR(pd->qos_regmap[j])) {
error = -ENODEV;
of_node_put(qos_node);
goto err_unprepare_clocks;
}
of_node_put(qos_node);
}
}
@ -800,11 +847,10 @@ static void rockchip_configure_pd_cnt(struct rockchip_pmu *pmu,
static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
struct device_node *parent)
{
struct device_node *np;
struct generic_pm_domain *child_domain, *parent_domain;
int error;
for_each_child_of_node(parent, np) {
for_each_child_of_node_scoped(parent, np) {
u32 idx;
error = of_property_read_u32(parent, "reg", &idx);
@ -812,7 +858,7 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
dev_err(pmu->dev,
"%pOFn: failed to retrieve domain id (reg): %d\n",
parent, error);
goto err_out;
return error;
}
parent_domain = pmu->genpd_data.domains[idx];
@ -820,7 +866,7 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
if (error) {
dev_err(pmu->dev, "failed to handle node %pOFn: %d\n",
np, error);
goto err_out;
return error;
}
error = of_property_read_u32(np, "reg", &idx);
@ -828,7 +874,7 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
dev_err(pmu->dev,
"%pOFn: failed to retrieve domain id (reg): %d\n",
np, error);
goto err_out;
return error;
}
child_domain = pmu->genpd_data.domains[idx];
@ -836,7 +882,7 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
if (error) {
dev_err(pmu->dev, "%s failed to add subdomain %s: %d\n",
parent_domain->name, child_domain->name, error);
goto err_out;
return error;
} else {
dev_dbg(pmu->dev, "%s add subdomain: %s\n",
parent_domain->name, child_domain->name);
@ -846,17 +892,12 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
}
return 0;
err_out:
of_node_put(np);
return error;
}
static int rockchip_pm_domain_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *node;
struct device *parent;
struct rockchip_pmu *pmu;
const struct rockchip_pmu_info *pmu_info;
@ -912,14 +953,13 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
* Prevent any rockchip_pmu_block() from racing with the remainder of
* setup (clocks, register initialization).
*/
mutex_lock(&dmc_pmu_mutex);
guard(mutex)(&dmc_pmu_mutex);
for_each_available_child_of_node(np, node) {
for_each_available_child_of_node_scoped(np, node) {
error = rockchip_pm_add_one_domain(pmu, node);
if (error) {
dev_err(dev, "failed to handle node %pOFn: %d\n",
node, error);
of_node_put(node);
goto err_out;
}
@ -927,7 +967,6 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
if (error < 0) {
dev_err(dev, "failed to handle subdomain node %pOFn: %d\n",
node, error);
of_node_put(node);
goto err_out;
}
}
@ -947,13 +986,10 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
if (!WARN_ON_ONCE(dmc_pmu))
dmc_pmu = pmu;
mutex_unlock(&dmc_pmu_mutex);
return 0;
err_out:
rockchip_pm_domain_cleanup(pmu);
mutex_unlock(&dmc_pmu_mutex);
return error;
}
@ -1106,6 +1142,28 @@ static const struct rockchip_domain_info rk3568_pm_domains[] = {
[RK3568_PD_PIPE] = DOMAIN_RK3568("pipe", BIT(8), BIT(11), false),
};
static const struct rockchip_domain_info rk3576_pm_domains[] = {
[RK3576_PD_NPU] = DOMAIN_RK3576("npu", 0x0, BIT(0), BIT(0), 0, 0x0, 0, 0, 0, false),
[RK3576_PD_NVM] = DOMAIN_RK3576("nvm", 0x0, BIT(6), 0, BIT(6), 0x4, BIT(2), BIT(18), BIT(2), false),
[RK3576_PD_SDGMAC] = DOMAIN_RK3576("sdgmac", 0x0, BIT(7), 0, BIT(7), 0x4, BIT(1), BIT(17), 0x6, false),
[RK3576_PD_AUDIO] = DOMAIN_RK3576("audio", 0x0, BIT(8), 0, BIT(8), 0x4, BIT(0), BIT(16), BIT(0), false),
[RK3576_PD_PHP] = DOMAIN_RK3576("php", 0x0, BIT(9), 0, BIT(9), 0x0, BIT(15), BIT(15), BIT(15), false),
[RK3576_PD_SUBPHP] = DOMAIN_RK3576("subphp", 0x0, BIT(10), 0, BIT(10), 0x0, 0, 0, 0, false),
[RK3576_PD_VOP] = DOMAIN_RK3576("vop", 0x0, BIT(11), 0, BIT(11), 0x0, 0x6000, 0x6000, 0x6000, false),
[RK3576_PD_VO1] = DOMAIN_RK3576("vo1", 0x0, BIT(14), 0, BIT(14), 0x0, BIT(12), BIT(12), 0x7000, false),
[RK3576_PD_VO0] = DOMAIN_RK3576("vo0", 0x0, BIT(15), 0, BIT(15), 0x0, BIT(11), BIT(11), 0x6800, false),
[RK3576_PD_USB] = DOMAIN_RK3576("usb", 0x4, BIT(0), 0, BIT(16), 0x0, BIT(10), BIT(10), 0x6400, true),
[RK3576_PD_VI] = DOMAIN_RK3576("vi", 0x4, BIT(1), 0, BIT(17), 0x0, BIT(9), BIT(9), BIT(9), false),
[RK3576_PD_VEPU0] = DOMAIN_RK3576("vepu0", 0x4, BIT(2), 0, BIT(18), 0x0, BIT(7), BIT(7), 0x280, false),
[RK3576_PD_VEPU1] = DOMAIN_RK3576("vepu1", 0x4, BIT(3), 0, BIT(19), 0x0, BIT(8), BIT(8), BIT(8), false),
[RK3576_PD_VDEC] = DOMAIN_RK3576("vdec", 0x4, BIT(4), 0, BIT(20), 0x0, BIT(6), BIT(6), BIT(6), false),
[RK3576_PD_VPU] = DOMAIN_RK3576("vpu", 0x4, BIT(5), 0, BIT(21), 0x0, BIT(5), BIT(5), BIT(5), false),
[RK3576_PD_NPUTOP] = DOMAIN_RK3576("nputop", 0x4, BIT(6), 0, BIT(22), 0x0, 0x18, 0x18, 0x18, false),
[RK3576_PD_NPU0] = DOMAIN_RK3576("npu0", 0x4, BIT(7), 0, BIT(23), 0x0, BIT(1), BIT(1), 0x1a, false),
[RK3576_PD_NPU1] = DOMAIN_RK3576("npu1", 0x4, BIT(8), 0, BIT(24), 0x0, BIT(2), BIT(2), 0x1c, false),
[RK3576_PD_GPU] = DOMAIN_RK3576("gpu", 0x4, BIT(9), 0, BIT(25), 0x0, BIT(0), BIT(0), BIT(0), false),
};
static const struct rockchip_domain_info rk3588_pm_domains[] = {
[RK3588_PD_GPU] = DOMAIN_RK3588("gpu", 0x0, BIT(0), 0, 0x0, 0, BIT(1), 0x0, BIT(0), BIT(0), false),
[RK3588_PD_NPU] = DOMAIN_RK3588("npu", 0x0, BIT(1), BIT(1), 0x0, 0, 0, 0x0, 0, 0, false),
@ -1284,6 +1342,22 @@ static const struct rockchip_pmu_info rk3568_pmu = {
.domain_info = rk3568_pm_domains,
};
static const struct rockchip_pmu_info rk3576_pmu = {
.pwr_offset = 0x210,
.status_offset = 0x230,
.chain_status_offset = 0x248,
.mem_status_offset = 0x250,
.mem_pwr_offset = 0x300,
.req_offset = 0x110,
.idle_offset = 0x128,
.ack_offset = 0x120,
.repair_status_offset = 0x570,
.clk_ungate_offset = 0x140,
.num_domains = ARRAY_SIZE(rk3576_pm_domains),
.domain_info = rk3576_pm_domains,
};
static const struct rockchip_pmu_info rk3588_pmu = {
.pwr_offset = 0x14c,
.status_offset = 0x180,
@ -1359,6 +1433,10 @@ static const struct of_device_id rockchip_pm_domain_dt_match[] = {
.compatible = "rockchip,rk3568-power-controller",
.data = (void *)&rk3568_pmu,
},
{
.compatible = "rockchip,rk3576-power-controller",
.data = (void *)&rk3576_pmu,
},
{
.compatible = "rockchip,rk3588-power-controller",
.data = (void *)&rk3588_pmu,

View File

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
#ifndef __DT_BINDINGS_POWER_RK3576_POWER_H__
#define __DT_BINDINGS_POWER_RK3576_POWER_H__
/* VD_NPU */
#define RK3576_PD_NPU 0
#define RK3576_PD_NPUTOP 1
#define RK3576_PD_NPU0 2
#define RK3576_PD_NPU1 3
/* VD_GPU */
#define RK3576_PD_GPU 4
/* VD_LOGIC */
#define RK3576_PD_NVM 5
#define RK3576_PD_SDGMAC 6
#define RK3576_PD_USB 7
#define RK3576_PD_PHP 8
#define RK3576_PD_SUBPHP 9
#define RK3576_PD_AUDIO 10
#define RK3576_PD_VEPU0 11
#define RK3576_PD_VEPU1 12
#define RK3576_PD_VPU 13
#define RK3576_PD_VDEC 14
#define RK3576_PD_VI 15
#define RK3576_PD_VO0 16
#define RK3576_PD_VO1 17
#define RK3576_PD_VOP 18
#endif

View File

@ -198,8 +198,11 @@ struct generic_pm_domain {
spinlock_t slock;
unsigned long lock_flags;
};
struct {
raw_spinlock_t raw_slock;
unsigned long raw_lock_flags;
};
};
};
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@ -473,6 +476,9 @@ struct device *dev_pm_domain_attach_by_name(struct device *dev,
int dev_pm_domain_attach_list(struct device *dev,
const struct dev_pm_domain_attach_data *data,
struct dev_pm_domain_list **list);
int devm_pm_domain_attach_list(struct device *dev,
const struct dev_pm_domain_attach_data *data,
struct dev_pm_domain_list **list);
void dev_pm_domain_detach(struct device *dev, bool power_off);
void dev_pm_domain_detach_list(struct dev_pm_domain_list *list);
int dev_pm_domain_start(struct device *dev);
@ -499,6 +505,14 @@ static inline int dev_pm_domain_attach_list(struct device *dev,
{
return 0;
}
static inline int devm_pm_domain_attach_list(struct device *dev,
const struct dev_pm_domain_attach_data *data,
struct dev_pm_domain_list **list)
{
return 0;
}
static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
static inline void dev_pm_domain_detach_list(struct dev_pm_domain_list *list) {}
static inline int dev_pm_domain_start(struct device *dev)