Merge branch 'depends/clk' into next/drivers

This is a snapshot of the stable clk branch at

git://git.linaro.org/people/mturquette/linux.git clk-for-3.10

which is a dependency for the tegra clock changes.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
Arnd Bergmann 2013-04-09 17:09:35 +02:00
commit 5790c58b13
25 changed files with 1620 additions and 128 deletions

View File

@ -0,0 +1,56 @@
Frequently asked questions about the sunxi clock system
=======================================================
This document contains useful bits of information that people tend to ask
about the sunxi clock system, as well as accompanying ASCII art when adequate.
Q: Why is the main 24MHz oscillator gatable? Wouldn't that break the
system?
A: The 24MHz oscillator allows gating to save power. Indeed, if gated
carelessly the system would stop functioning, but with the right
steps, one can gate it and keep the system running. Consider this
simplified suspend example:
While the system is operational, you would see something like
24MHz 32kHz
|
PLL1
\
\_ CPU Mux
|
[CPU]
When you are about to suspend, you switch the CPU Mux to the 32kHz
oscillator:
24Mhz 32kHz
| |
PLL1 |
/
CPU Mux _/
|
[CPU]
Finally you can gate the main oscillator
32kHz
|
|
/
CPU Mux _/
|
[CPU]
Q: Were can I learn more about the sunxi clocks?
A: The linux-sunxi wiki contains a page documenting the clock registers,
you can find it at
http://linux-sunxi.org/A10/CCM
The authoritative source for information at this time is the ccmu driver
released by Allwinner, you can find it at
https://github.com/linux-sunxi/linux-sunxi/tree/sunxi-3.0/arch/arm/mach-sun4i/clock/ccmu

View File

@ -174,9 +174,9 @@ int clk_foo_enable(struct clk_hw *hw)
};
Below is a matrix detailing which clk_ops are mandatory based upon the
hardware capbilities of that clock. A cell marked as "y" means
hardware capabilities of that clock. A cell marked as "y" means
mandatory, a cell marked as "n" implies that either including that
callback is invalid or otherwise uneccesary. Empty cells are either
callback is invalid or otherwise unnecessary. Empty cells are either
optional or must be evaluated on a case-by-case basis.
clock hardware characteristics

View File

@ -0,0 +1,22 @@
Binding for the axi-clkgen clock generator
This binding uses the common clock binding[1].
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
Required properties:
- compatible : shall be "adi,axi-clkgen".
- #clock-cells : from common clock binding; Should always be set to 0.
- reg : Address and length of the axi-clkgen register set.
- clocks : Phandle and clock specifier for the parent clock.
Optional properties:
- clock-output-names : From common clock binding.
Example:
clock@0xff000000 {
compatible = "adi,axi-clkgen";
#clock-cells = <0>;
reg = <0xff000000 0x1000>;
clocks = <&osc 1>;
};

View File

@ -0,0 +1,44 @@
Device Tree Clock bindings for arch-sunxi
This binding uses the common clock binding[1].
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
Required properties:
- compatible : shall be one of the following:
"allwinner,sun4i-osc-clk" - for a gatable oscillator
"allwinner,sun4i-pll1-clk" - for the main PLL clock
"allwinner,sun4i-cpu-clk" - for the CPU multiplexer clock
"allwinner,sun4i-axi-clk" - for the AXI clock
"allwinner,sun4i-ahb-clk" - for the AHB clock
"allwinner,sun4i-apb0-clk" - for the APB0 clock
"allwinner,sun4i-apb1-clk" - for the APB1 clock
"allwinner,sun4i-apb1-mux-clk" - for the APB1 clock muxing
Required properties for all clocks:
- reg : shall be the control register address for the clock.
- clocks : shall be the input parent clock(s) phandle for the clock
- #clock-cells : from common clock binding; shall be set to 0.
For example:
osc24M: osc24M@01c20050 {
#clock-cells = <0>;
compatible = "allwinner,sun4i-osc-clk";
reg = <0x01c20050 0x4>;
clocks = <&osc24M_fixed>;
};
pll1: pll1@01c20000 {
#clock-cells = <0>;
compatible = "allwinner,sun4i-pll1-clk";
reg = <0x01c20000 0x4>;
clocks = <&osc24M>;
};
cpu: cpu@01c20054 {
#clock-cells = <0>;
compatible = "allwinner,sun4i-cpu-clk";
reg = <0x01c20054 0x4>;
clocks = <&osc32k>, <&osc24M>, <&pll1>;
};

View File

@ -169,7 +169,7 @@ struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
busy->mux.reg = reg;
busy->mux.shift = shift;
busy->mux.width = width;
busy->mux.mask = BIT(width) - 1;
busy->mux.lock = &imx_ccm_lock;
busy->mux_ops = &clk_mux_ops;

View File

@ -63,6 +63,14 @@ config CLK_TWL6040
McPDM. McPDM module is using the external bit clock on the McPDM bus
as functional clock.
config COMMON_CLK_AXI_CLKGEN
tristate "AXI clkgen driver"
depends on ARCH_ZYNQ || MICROBLAZE
help
---help---
Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx
FPGAs. It is commonly used in Analog Devices' reference designs.
endmenu
source "drivers/clk/mvebu/Kconfig"

View File

@ -7,6 +7,7 @@ obj-$(CONFIG_COMMON_CLK) += clk-fixed-factor.o
obj-$(CONFIG_COMMON_CLK) += clk-fixed-rate.o
obj-$(CONFIG_COMMON_CLK) += clk-gate.o
obj-$(CONFIG_COMMON_CLK) += clk-mux.o
obj-$(CONFIG_COMMON_CLK) += clk-composite.o
# SoCs specific
obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o
@ -23,6 +24,7 @@ ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_ARCH_MMP) += mmp/
endif
obj-$(CONFIG_MACH_LOONGSON1) += clk-ls1x.o
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
obj-$(CONFIG_ARCH_ZYNQ) += clk-zynq.o
@ -31,6 +33,7 @@ obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_X86) += x86/
# Chip specific
obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o
obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o

View File

@ -0,0 +1,331 @@
/*
* AXI clkgen driver
*
* Copyright 2012-2013 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
*
* Licensed under the GPL-2.
*
*/
#include <linux/platform_device.h>
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/err.h>
#define AXI_CLKGEN_REG_UPDATE_ENABLE 0x04
#define AXI_CLKGEN_REG_CLK_OUT1 0x08
#define AXI_CLKGEN_REG_CLK_OUT2 0x0c
#define AXI_CLKGEN_REG_CLK_DIV 0x10
#define AXI_CLKGEN_REG_CLK_FB1 0x14
#define AXI_CLKGEN_REG_CLK_FB2 0x18
#define AXI_CLKGEN_REG_LOCK1 0x1c
#define AXI_CLKGEN_REG_LOCK2 0x20
#define AXI_CLKGEN_REG_LOCK3 0x24
#define AXI_CLKGEN_REG_FILTER1 0x28
#define AXI_CLKGEN_REG_FILTER2 0x2c
struct axi_clkgen {
void __iomem *base;
struct clk_hw clk_hw;
};
static uint32_t axi_clkgen_lookup_filter(unsigned int m)
{
switch (m) {
case 0:
return 0x01001990;
case 1:
return 0x01001190;
case 2:
return 0x01009890;
case 3:
return 0x01001890;
case 4:
return 0x01008890;
case 5 ... 8:
return 0x01009090;
case 9 ... 11:
return 0x01000890;
case 12:
return 0x08009090;
case 13 ... 22:
return 0x01001090;
case 23 ... 36:
return 0x01008090;
case 37 ... 46:
return 0x08001090;
default:
return 0x08008090;
}
}
static const uint32_t axi_clkgen_lock_table[] = {
0x060603e8, 0x060603e8, 0x080803e8, 0x0b0b03e8,
0x0e0e03e8, 0x111103e8, 0x131303e8, 0x161603e8,
0x191903e8, 0x1c1c03e8, 0x1f1f0384, 0x1f1f0339,
0x1f1f02ee, 0x1f1f02bc, 0x1f1f028a, 0x1f1f0271,
0x1f1f023f, 0x1f1f0226, 0x1f1f020d, 0x1f1f01f4,
0x1f1f01db, 0x1f1f01c2, 0x1f1f01a9, 0x1f1f0190,
0x1f1f0190, 0x1f1f0177, 0x1f1f015e, 0x1f1f015e,
0x1f1f0145, 0x1f1f0145, 0x1f1f012c, 0x1f1f012c,
0x1f1f012c, 0x1f1f0113, 0x1f1f0113, 0x1f1f0113,
};
static uint32_t axi_clkgen_lookup_lock(unsigned int m)
{
if (m < ARRAY_SIZE(axi_clkgen_lock_table))
return axi_clkgen_lock_table[m];
return 0x1f1f00fa;
}
static const unsigned int fpfd_min = 10000;
static const unsigned int fpfd_max = 300000;
static const unsigned int fvco_min = 600000;
static const unsigned int fvco_max = 1200000;
static void axi_clkgen_calc_params(unsigned long fin, unsigned long fout,
unsigned int *best_d, unsigned int *best_m, unsigned int *best_dout)
{
unsigned long d, d_min, d_max, _d_min, _d_max;
unsigned long m, m_min, m_max;
unsigned long f, dout, best_f, fvco;
fin /= 1000;
fout /= 1000;
best_f = ULONG_MAX;
*best_d = 0;
*best_m = 0;
*best_dout = 0;
d_min = max_t(unsigned long, DIV_ROUND_UP(fin, fpfd_max), 1);
d_max = min_t(unsigned long, fin / fpfd_min, 80);
m_min = max_t(unsigned long, DIV_ROUND_UP(fvco_min, fin) * d_min, 1);
m_max = min_t(unsigned long, fvco_max * d_max / fin, 64);
for (m = m_min; m <= m_max; m++) {
_d_min = max(d_min, DIV_ROUND_UP(fin * m, fvco_max));
_d_max = min(d_max, fin * m / fvco_min);
for (d = _d_min; d <= _d_max; d++) {
fvco = fin * m / d;
dout = DIV_ROUND_CLOSEST(fvco, fout);
dout = clamp_t(unsigned long, dout, 1, 128);
f = fvco / dout;
if (abs(f - fout) < abs(best_f - fout)) {
best_f = f;
*best_d = d;
*best_m = m;
*best_dout = dout;
if (best_f == fout)
return;
}
}
}
}
static void axi_clkgen_calc_clk_params(unsigned int divider, unsigned int *low,
unsigned int *high, unsigned int *edge, unsigned int *nocount)
{
if (divider == 1)
*nocount = 1;
else
*nocount = 0;
*high = divider / 2;
*edge = divider % 2;
*low = divider - *high;
}
static void axi_clkgen_write(struct axi_clkgen *axi_clkgen,
unsigned int reg, unsigned int val)
{
writel(val, axi_clkgen->base + reg);
}
static void axi_clkgen_read(struct axi_clkgen *axi_clkgen,
unsigned int reg, unsigned int *val)
{
*val = readl(axi_clkgen->base + reg);
}
static struct axi_clkgen *clk_hw_to_axi_clkgen(struct clk_hw *clk_hw)
{
return container_of(clk_hw, struct axi_clkgen, clk_hw);
}
static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
unsigned long rate, unsigned long parent_rate)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
unsigned int d, m, dout;
unsigned int nocount;
unsigned int high;
unsigned int edge;
unsigned int low;
uint32_t filter;
uint32_t lock;
if (parent_rate == 0 || rate == 0)
return -EINVAL;
axi_clkgen_calc_params(parent_rate, rate, &d, &m, &dout);
if (d == 0 || dout == 0 || m == 0)
return -EINVAL;
filter = axi_clkgen_lookup_filter(m - 1);
lock = axi_clkgen_lookup_lock(m - 1);
axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_UPDATE_ENABLE, 0);
axi_clkgen_calc_clk_params(dout, &low, &high, &edge, &nocount);
axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_OUT1,
(high << 6) | low);
axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_OUT2,
(edge << 7) | (nocount << 6));
axi_clkgen_calc_clk_params(d, &low, &high, &edge, &nocount);
axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_DIV,
(edge << 13) | (nocount << 12) | (high << 6) | low);
axi_clkgen_calc_clk_params(m, &low, &high, &edge, &nocount);
axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_FB1,
(high << 6) | low);
axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_FB2,
(edge << 7) | (nocount << 6));
axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_LOCK1, lock & 0x3ff);
axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_LOCK2,
(((lock >> 16) & 0x1f) << 10) | 0x1);
axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_LOCK3,
(((lock >> 24) & 0x1f) << 10) | 0x3e9);
axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_FILTER1, filter >> 16);
axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_FILTER2, filter);
axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_UPDATE_ENABLE, 1);
return 0;
}
static long axi_clkgen_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
unsigned int d, m, dout;
axi_clkgen_calc_params(*parent_rate, rate, &d, &m, &dout);
if (d == 0 || dout == 0 || m == 0)
return -EINVAL;
return *parent_rate / d * m / dout;
}
static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
unsigned long parent_rate)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
unsigned int d, m, dout;
unsigned int reg;
unsigned long long tmp;
axi_clkgen_read(axi_clkgen, AXI_CLKGEN_REG_CLK_OUT1, &reg);
dout = (reg & 0x3f) + ((reg >> 6) & 0x3f);
axi_clkgen_read(axi_clkgen, AXI_CLKGEN_REG_CLK_DIV, &reg);
d = (reg & 0x3f) + ((reg >> 6) & 0x3f);
axi_clkgen_read(axi_clkgen, AXI_CLKGEN_REG_CLK_FB1, &reg);
m = (reg & 0x3f) + ((reg >> 6) & 0x3f);
if (d == 0 || dout == 0)
return 0;
tmp = (unsigned long long)(parent_rate / d) * m;
do_div(tmp, dout);
if (tmp > ULONG_MAX)
return ULONG_MAX;
return tmp;
}
static const struct clk_ops axi_clkgen_ops = {
.recalc_rate = axi_clkgen_recalc_rate,
.round_rate = axi_clkgen_round_rate,
.set_rate = axi_clkgen_set_rate,
};
static int axi_clkgen_probe(struct platform_device *pdev)
{
struct axi_clkgen *axi_clkgen;
struct clk_init_data init;
const char *parent_name;
const char *clk_name;
struct resource *mem;
struct clk *clk;
axi_clkgen = devm_kzalloc(&pdev->dev, sizeof(*axi_clkgen), GFP_KERNEL);
if (!axi_clkgen)
return -ENOMEM;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
axi_clkgen->base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(axi_clkgen->base))
return PTR_ERR(axi_clkgen->base);
parent_name = of_clk_get_parent_name(pdev->dev.of_node, 0);
if (!parent_name)
return -EINVAL;
clk_name = pdev->dev.of_node->name;
of_property_read_string(pdev->dev.of_node, "clock-output-names",
&clk_name);
init.name = clk_name;
init.ops = &axi_clkgen_ops;
init.flags = 0;
init.parent_names = &parent_name;
init.num_parents = 1;
axi_clkgen->clk_hw.init = &init;
clk = devm_clk_register(&pdev->dev, &axi_clkgen->clk_hw);
if (IS_ERR(clk))
return PTR_ERR(clk);
return of_clk_add_provider(pdev->dev.of_node, of_clk_src_simple_get,
clk);
}
static int axi_clkgen_remove(struct platform_device *pdev)
{
of_clk_del_provider(pdev->dev.of_node);
return 0;
}
static const struct of_device_id axi_clkgen_ids[] = {
{ .compatible = "adi,axi-clkgen-1.00.a" },
{ },
};
MODULE_DEVICE_TABLE(of, axi_clkgen_ids);
static struct platform_driver axi_clkgen_driver = {
.driver = {
.name = "adi-axi-clkgen",
.owner = THIS_MODULE,
.of_match_table = axi_clkgen_ids,
},
.probe = axi_clkgen_probe,
.remove = axi_clkgen_remove,
};
module_platform_driver(axi_clkgen_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("Driver for the Analog Devices' AXI clkgen pcore clock generator");

201
drivers/clk/clk-composite.c Normal file
View File

@ -0,0 +1,201 @@
/*
* Copyright (c) 2013 NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/slab.h>
#define to_clk_composite(_hw) container_of(_hw, struct clk_composite, hw)
static u8 clk_composite_get_parent(struct clk_hw *hw)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *mux_ops = composite->mux_ops;
struct clk_hw *mux_hw = composite->mux_hw;
mux_hw->clk = hw->clk;
return mux_ops->get_parent(mux_hw);
}
static int clk_composite_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *mux_ops = composite->mux_ops;
struct clk_hw *mux_hw = composite->mux_hw;
mux_hw->clk = hw->clk;
return mux_ops->set_parent(mux_hw, index);
}
static unsigned long clk_composite_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *div_ops = composite->div_ops;
struct clk_hw *div_hw = composite->div_hw;
div_hw->clk = hw->clk;
return div_ops->recalc_rate(div_hw, parent_rate);
}
static long clk_composite_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *div_ops = composite->div_ops;
struct clk_hw *div_hw = composite->div_hw;
div_hw->clk = hw->clk;
return div_ops->round_rate(div_hw, rate, prate);
}
static int clk_composite_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *div_ops = composite->div_ops;
struct clk_hw *div_hw = composite->div_hw;
div_hw->clk = hw->clk;
return div_ops->set_rate(div_hw, rate, parent_rate);
}
static int clk_composite_is_enabled(struct clk_hw *hw)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *gate_ops = composite->gate_ops;
struct clk_hw *gate_hw = composite->gate_hw;
gate_hw->clk = hw->clk;
return gate_ops->is_enabled(gate_hw);
}
static int clk_composite_enable(struct clk_hw *hw)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *gate_ops = composite->gate_ops;
struct clk_hw *gate_hw = composite->gate_hw;
gate_hw->clk = hw->clk;
return gate_ops->enable(gate_hw);
}
static void clk_composite_disable(struct clk_hw *hw)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *gate_ops = composite->gate_ops;
struct clk_hw *gate_hw = composite->gate_hw;
gate_hw->clk = hw->clk;
gate_ops->disable(gate_hw);
}
struct clk *clk_register_composite(struct device *dev, const char *name,
const char **parent_names, int num_parents,
struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
struct clk_hw *div_hw, const struct clk_ops *div_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags)
{
struct clk *clk;
struct clk_init_data init;
struct clk_composite *composite;
struct clk_ops *clk_composite_ops;
composite = kzalloc(sizeof(*composite), GFP_KERNEL);
if (!composite) {
pr_err("%s: could not allocate composite clk\n", __func__);
return ERR_PTR(-ENOMEM);
}
init.name = name;
init.flags = flags | CLK_IS_BASIC;
init.parent_names = parent_names;
init.num_parents = num_parents;
clk_composite_ops = &composite->ops;
if (mux_hw && mux_ops) {
if (!mux_ops->get_parent || !mux_ops->set_parent) {
clk = ERR_PTR(-EINVAL);
goto err;
}
composite->mux_hw = mux_hw;
composite->mux_ops = mux_ops;
clk_composite_ops->get_parent = clk_composite_get_parent;
clk_composite_ops->set_parent = clk_composite_set_parent;
}
if (div_hw && div_ops) {
if (!div_ops->recalc_rate || !div_ops->round_rate ||
!div_ops->set_rate) {
clk = ERR_PTR(-EINVAL);
goto err;
}
composite->div_hw = div_hw;
composite->div_ops = div_ops;
clk_composite_ops->recalc_rate = clk_composite_recalc_rate;
clk_composite_ops->round_rate = clk_composite_round_rate;
clk_composite_ops->set_rate = clk_composite_set_rate;
}
if (gate_hw && gate_ops) {
if (!gate_ops->is_enabled || !gate_ops->enable ||
!gate_ops->disable) {
clk = ERR_PTR(-EINVAL);
goto err;
}
composite->gate_hw = gate_hw;
composite->gate_ops = gate_ops;
clk_composite_ops->is_enabled = clk_composite_is_enabled;
clk_composite_ops->enable = clk_composite_enable;
clk_composite_ops->disable = clk_composite_disable;
}
init.ops = clk_composite_ops;
composite->hw.init = &init;
clk = clk_register(dev, &composite->hw);
if (IS_ERR(clk))
goto err;
if (composite->mux_hw)
composite->mux_hw->clk = clk;
if (composite->div_hw)
composite->div_hw->clk = clk;
if (composite->gate_hw)
composite->gate_hw->clk = clk;
return clk;
err:
kfree(composite);
return clk;
}

View File

@ -32,6 +32,7 @@
static u8 clk_mux_get_parent(struct clk_hw *hw)
{
struct clk_mux *mux = to_clk_mux(hw);
int num_parents = __clk_get_num_parents(hw->clk);
u32 val;
/*
@ -42,7 +43,16 @@ static u8 clk_mux_get_parent(struct clk_hw *hw)
* val = 0x4 really means "bit 2, index starts at bit 0"
*/
val = readl(mux->reg) >> mux->shift;
val &= (1 << mux->width) - 1;
val &= mux->mask;
if (mux->table) {
int i;
for (i = 0; i < num_parents; i++)
if (mux->table[i] == val)
return i;
return -EINVAL;
}
if (val && (mux->flags & CLK_MUX_INDEX_BIT))
val = ffs(val) - 1;
@ -50,7 +60,7 @@ static u8 clk_mux_get_parent(struct clk_hw *hw)
if (val && (mux->flags & CLK_MUX_INDEX_ONE))
val--;
if (val >= __clk_get_num_parents(hw->clk))
if (val >= num_parents)
return -EINVAL;
return val;
@ -62,17 +72,22 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
u32 val;
unsigned long flags = 0;
if (mux->flags & CLK_MUX_INDEX_BIT)
index = (1 << ffs(index));
if (mux->table)
index = mux->table[index];
if (mux->flags & CLK_MUX_INDEX_ONE)
index++;
else {
if (mux->flags & CLK_MUX_INDEX_BIT)
index = (1 << ffs(index));
if (mux->flags & CLK_MUX_INDEX_ONE)
index++;
}
if (mux->lock)
spin_lock_irqsave(mux->lock, flags);
val = readl(mux->reg);
val &= ~(((1 << mux->width) - 1) << mux->shift);
val &= ~(mux->mask << mux->shift);
val |= index << mux->shift;
writel(val, mux->reg);
@ -88,10 +103,10 @@ const struct clk_ops clk_mux_ops = {
};
EXPORT_SYMBOL_GPL(clk_mux_ops);
struct clk *clk_register_mux(struct device *dev, const char *name,
struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char **parent_names, u8 num_parents, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_mux_flags, spinlock_t *lock)
void __iomem *reg, u8 shift, u32 mask,
u8 clk_mux_flags, u32 *table, spinlock_t *lock)
{
struct clk_mux *mux;
struct clk *clk;
@ -113,9 +128,10 @@ struct clk *clk_register_mux(struct device *dev, const char *name,
/* struct clk_mux assignments */
mux->reg = reg;
mux->shift = shift;
mux->width = width;
mux->mask = mask;
mux->flags = clk_mux_flags;
mux->lock = lock;
mux->table = table;
mux->hw.init = &init;
clk = clk_register(dev, &mux->hw);
@ -125,3 +141,15 @@ struct clk *clk_register_mux(struct device *dev, const char *name,
return clk;
}
struct clk *clk_register_mux(struct device *dev, const char *name,
const char **parent_names, u8 num_parents, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_mux_flags, spinlock_t *lock)
{
u32 mask = BIT(width) - 1;
return clk_register_mux_table(dev, name, parent_names, num_parents,
flags, reg, shift, mask, clk_mux_flags,
NULL, lock);
}

View File

@ -1113,7 +1113,7 @@ void __init sirfsoc_of_clk_init(void)
for (i = pll1; i < maxclk; i++) {
prima2_clks[i] = clk_register(NULL, prima2_clk_hw_array[i]);
BUG_ON(!prima2_clks[i]);
BUG_ON(IS_ERR(prima2_clks[i]));
}
clk_register_clkdev(prima2_clks[cpu], NULL, "cpu");
clk_register_clkdev(prima2_clks[io], NULL, "io");

View File

@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/clk-provider.h>
#include <linux/clk/zynq.h>
static void __iomem *slcr_base;

View File

@ -19,14 +19,77 @@
#include <linux/of.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/sched.h>
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);
static struct task_struct *prepare_owner;
static struct task_struct *enable_owner;
static int prepare_refcnt;
static int enable_refcnt;
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
/*** locking ***/
static void clk_prepare_lock(void)
{
if (!mutex_trylock(&prepare_lock)) {
if (prepare_owner == current) {
prepare_refcnt++;
return;
}
mutex_lock(&prepare_lock);
}
WARN_ON_ONCE(prepare_owner != NULL);
WARN_ON_ONCE(prepare_refcnt != 0);
prepare_owner = current;
prepare_refcnt = 1;
}
static void clk_prepare_unlock(void)
{
WARN_ON_ONCE(prepare_owner != current);
WARN_ON_ONCE(prepare_refcnt == 0);
if (--prepare_refcnt)
return;
prepare_owner = NULL;
mutex_unlock(&prepare_lock);
}
static unsigned long clk_enable_lock(void)
{
unsigned long flags;
if (!spin_trylock_irqsave(&enable_lock, flags)) {
if (enable_owner == current) {
enable_refcnt++;
return flags;
}
spin_lock_irqsave(&enable_lock, flags);
}
WARN_ON_ONCE(enable_owner != NULL);
WARN_ON_ONCE(enable_refcnt != 0);
enable_owner = current;
enable_refcnt = 1;
return flags;
}
static void clk_enable_unlock(unsigned long flags)
{
WARN_ON_ONCE(enable_owner != current);
WARN_ON_ONCE(enable_refcnt == 0);
if (--enable_refcnt)
return;
enable_owner = NULL;
spin_unlock_irqrestore(&enable_lock, flags);
}
/*** debugfs support ***/
#ifdef CONFIG_COMMON_CLK_DEBUG
@ -69,7 +132,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
seq_printf(s, "---------------------------------------------------------------------\n");
mutex_lock(&prepare_lock);
clk_prepare_lock();
hlist_for_each_entry(c, &clk_root_list, child_node)
clk_summary_show_subtree(s, c, 0);
@ -77,7 +140,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
hlist_for_each_entry(c, &clk_orphan_list, child_node)
clk_summary_show_subtree(s, c, 0);
mutex_unlock(&prepare_lock);
clk_prepare_unlock();
return 0;
}
@ -130,7 +193,7 @@ static int clk_dump(struct seq_file *s, void *data)
seq_printf(s, "{");
mutex_lock(&prepare_lock);
clk_prepare_lock();
hlist_for_each_entry(c, &clk_root_list, child_node) {
if (!first_node)
@ -144,7 +207,7 @@ static int clk_dump(struct seq_file *s, void *data)
clk_dump_subtree(s, c, 0);
}
mutex_unlock(&prepare_lock);
clk_prepare_unlock();
seq_printf(s, "}");
return 0;
@ -316,7 +379,7 @@ static int __init clk_debug_init(void)
if (!orphandir)
return -ENOMEM;
mutex_lock(&prepare_lock);
clk_prepare_lock();
hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_debug_create_subtree(clk, rootdir);
@ -326,7 +389,7 @@ static int __init clk_debug_init(void)
inited = 1;
mutex_unlock(&prepare_lock);
clk_prepare_unlock();
return 0;
}
@ -335,6 +398,31 @@ late_initcall(clk_debug_init);
static inline int clk_debug_register(struct clk *clk) { return 0; }
#endif
/* caller must hold prepare_lock */
static void clk_unprepare_unused_subtree(struct clk *clk)
{
struct clk *child;
if (!clk)
return;
hlist_for_each_entry(child, &clk->children, child_node)
clk_unprepare_unused_subtree(child);
if (clk->prepare_count)
return;
if (clk->flags & CLK_IGNORE_UNUSED)
return;
if (__clk_is_prepared(clk)) {
if (clk->ops->unprepare_unused)
clk->ops->unprepare_unused(clk->hw);
else if (clk->ops->unprepare)
clk->ops->unprepare(clk->hw);
}
}
/* caller must hold prepare_lock */
static void clk_disable_unused_subtree(struct clk *clk)
{
@ -347,7 +435,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
hlist_for_each_entry(child, &clk->children, child_node)
clk_disable_unused_subtree(child);
spin_lock_irqsave(&enable_lock, flags);
flags = clk_enable_lock();
if (clk->enable_count)
goto unlock_out;
@ -368,7 +456,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
}
unlock_out:
spin_unlock_irqrestore(&enable_lock, flags);
clk_enable_unlock(flags);
out:
return;
@ -378,7 +466,7 @@ static int clk_disable_unused(void)
{
struct clk *clk;
mutex_lock(&prepare_lock);
clk_prepare_lock();
hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_disable_unused_subtree(clk);
@ -386,7 +474,13 @@ static int clk_disable_unused(void)
hlist_for_each_entry(clk, &clk_orphan_list, child_node)
clk_disable_unused_subtree(clk);
mutex_unlock(&prepare_lock);
hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_unprepare_unused_subtree(clk);
hlist_for_each_entry(clk, &clk_orphan_list, child_node)
clk_unprepare_unused_subtree(clk);
clk_prepare_unlock();
return 0;
}
@ -451,6 +545,27 @@ unsigned long __clk_get_flags(struct clk *clk)
return !clk ? 0 : clk->flags;
}
bool __clk_is_prepared(struct clk *clk)
{
int ret;
if (!clk)
return false;
/*
* .is_prepared is optional for clocks that can prepare
* fall back to software usage counter if it is missing
*/
if (!clk->ops->is_prepared) {
ret = clk->prepare_count ? 1 : 0;
goto out;
}
ret = clk->ops->is_prepared(clk->hw);
out:
return !!ret;
}
bool __clk_is_enabled(struct clk *clk)
{
int ret;
@ -548,9 +663,9 @@ void __clk_unprepare(struct clk *clk)
*/
void clk_unprepare(struct clk *clk)
{
mutex_lock(&prepare_lock);
clk_prepare_lock();
__clk_unprepare(clk);
mutex_unlock(&prepare_lock);
clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_unprepare);
@ -596,9 +711,9 @@ int clk_prepare(struct clk *clk)
{
int ret;
mutex_lock(&prepare_lock);
clk_prepare_lock();
ret = __clk_prepare(clk);
mutex_unlock(&prepare_lock);
clk_prepare_unlock();
return ret;
}
@ -640,9 +755,9 @@ void clk_disable(struct clk *clk)
{
unsigned long flags;
spin_lock_irqsave(&enable_lock, flags);
flags = clk_enable_lock();
__clk_disable(clk);
spin_unlock_irqrestore(&enable_lock, flags);
clk_enable_unlock(flags);
}
EXPORT_SYMBOL_GPL(clk_disable);
@ -693,9 +808,9 @@ int clk_enable(struct clk *clk)
unsigned long flags;
int ret;
spin_lock_irqsave(&enable_lock, flags);
flags = clk_enable_lock();
ret = __clk_enable(clk);
spin_unlock_irqrestore(&enable_lock, flags);
clk_enable_unlock(flags);
return ret;
}
@ -740,9 +855,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned long ret;
mutex_lock(&prepare_lock);
clk_prepare_lock();
ret = __clk_round_rate(clk, rate);
mutex_unlock(&prepare_lock);
clk_prepare_unlock();
return ret;
}
@ -837,13 +952,13 @@ unsigned long clk_get_rate(struct clk *clk)
{
unsigned long rate;
mutex_lock(&prepare_lock);
clk_prepare_lock();
if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
__clk_recalc_rates(clk, 0);
rate = __clk_get_rate(clk);
mutex_unlock(&prepare_lock);
clk_prepare_unlock();
return rate;
}
@ -974,7 +1089,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
int ret = NOTIFY_DONE;
if (clk->rate == clk->new_rate)
return 0;
return NULL;
if (clk->notifier_count) {
ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
@ -1048,7 +1163,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
int ret = 0;
/* prevent racing with updates to the clock topology */
mutex_lock(&prepare_lock);
clk_prepare_lock();
/* bail early if nothing to do */
if (rate == clk->rate)
@ -1080,7 +1195,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
clk_change_rate(top);
out:
mutex_unlock(&prepare_lock);
clk_prepare_unlock();
return ret;
}
@ -1096,9 +1211,9 @@ struct clk *clk_get_parent(struct clk *clk)
{
struct clk *parent;
mutex_lock(&prepare_lock);
clk_prepare_lock();
parent = __clk_get_parent(clk);
mutex_unlock(&prepare_lock);
clk_prepare_unlock();
return parent;
}
@ -1242,19 +1357,19 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
__clk_prepare(parent);
/* FIXME replace with clk_is_enabled(clk) someday */
spin_lock_irqsave(&enable_lock, flags);
flags = clk_enable_lock();
if (clk->enable_count)
__clk_enable(parent);
spin_unlock_irqrestore(&enable_lock, flags);
clk_enable_unlock(flags);
/* change clock input source */
ret = clk->ops->set_parent(clk->hw, i);
/* clean up old prepare and enable */
spin_lock_irqsave(&enable_lock, flags);
flags = clk_enable_lock();
if (clk->enable_count)
__clk_disable(old_parent);
spin_unlock_irqrestore(&enable_lock, flags);
clk_enable_unlock(flags);
if (clk->prepare_count)
__clk_unprepare(old_parent);
@ -1286,7 +1401,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
return -ENOSYS;
/* prevent racing with updates to the clock topology */
mutex_lock(&prepare_lock);
clk_prepare_lock();
if (clk->parent == parent)
goto out;
@ -1315,7 +1430,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
__clk_reparent(clk, parent);
out:
mutex_unlock(&prepare_lock);
clk_prepare_unlock();
return ret;
}
@ -1338,7 +1453,7 @@ int __clk_init(struct device *dev, struct clk *clk)
if (!clk)
return -EINVAL;
mutex_lock(&prepare_lock);
clk_prepare_lock();
/* check to see if a clock with this name is already registered */
if (__clk_lookup(clk->name)) {
@ -1462,7 +1577,7 @@ int __clk_init(struct device *dev, struct clk *clk)
clk_debug_register(clk);
out:
mutex_unlock(&prepare_lock);
clk_prepare_unlock();
return ret;
}
@ -1696,7 +1811,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
if (!clk || !nb)
return -EINVAL;
mutex_lock(&prepare_lock);
clk_prepare_lock();
/* search the list of notifiers for this clk */
list_for_each_entry(cn, &clk_notifier_list, node)
@ -1720,7 +1835,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
clk->notifier_count++;
out:
mutex_unlock(&prepare_lock);
clk_prepare_unlock();
return ret;
}
@ -1745,7 +1860,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
if (!clk || !nb)
return -EINVAL;
mutex_lock(&prepare_lock);
clk_prepare_lock();
list_for_each_entry(cn, &clk_notifier_list, node)
if (cn->clk == clk)
@ -1766,7 +1881,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
ret = -ENOENT;
}
mutex_unlock(&prepare_lock);
clk_prepare_unlock();
return ret;
}

View File

@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/spinlock.h>
#include "clk.h"
DEFINE_SPINLOCK(mxs_lock);

View File

@ -960,47 +960,47 @@ void __init spear1340_clk_init(void)
SPEAR1340_SPDIF_IN_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "d0100000.spdif-in");
clk = clk_register_gate(NULL, "acp_clk", "acp_mclk", 0,
clk = clk_register_gate(NULL, "acp_clk", "ahb_clk", 0,
SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_ACP_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "acp_clk");
clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mclk", 0,
clk = clk_register_gate(NULL, "plgpio_clk", "ahb_clk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PLGPIO_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "e2800000.gpio");
clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mclk", 0,
clk = clk_register_gate(NULL, "video_dec_clk", "ahb_clk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_DEC_CLK_ENB,
0, &_lock);
clk_register_clkdev(clk, NULL, "video_dec");
clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mclk", 0,
clk = clk_register_gate(NULL, "video_enc_clk", "ahb_clk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_ENC_CLK_ENB,
0, &_lock);
clk_register_clkdev(clk, NULL, "video_enc");
clk = clk_register_gate(NULL, "video_in_clk", "video_in_mclk", 0,
clk = clk_register_gate(NULL, "video_in_clk", "ahb_clk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_IN_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "spear_vip");
clk = clk_register_gate(NULL, "cam0_clk", "cam0_mclk", 0,
clk = clk_register_gate(NULL, "cam0_clk", "ahb_clk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM0_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "d0200000.cam0");
clk = clk_register_gate(NULL, "cam1_clk", "cam1_mclk", 0,
clk = clk_register_gate(NULL, "cam1_clk", "ahb_clk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "d0300000.cam1");
clk = clk_register_gate(NULL, "cam2_clk", "cam2_mclk", 0,
clk = clk_register_gate(NULL, "cam2_clk", "ahb_clk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM2_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "d0400000.cam2");
clk = clk_register_gate(NULL, "cam3_clk", "cam3_mclk", 0,
clk = clk_register_gate(NULL, "cam3_clk", "ahb_clk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM3_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "d0500000.cam3");

View File

@ -0,0 +1,5 @@
#
# Makefile for sunxi specific clk
#
obj-y += clk-sunxi.o clk-factors.o

View File

@ -0,0 +1,180 @@
/*
* Copyright (C) 2013 Emilio López <emilio@elopez.com.ar>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Adjustable factor-based clock implementation
*/
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/delay.h>
#include "clk-factors.h"
/*
* DOC: basic adjustable factor-based clock that cannot gate
*
* Traits of this clock:
* prepare - clk_prepare only ensures that parents are prepared
* enable - clk_enable only ensures that parents are enabled
* rate - rate is adjustable.
* clk->rate = (parent->rate * N * (K + 1) >> P) / (M + 1)
* parent - fixed parent. No clk_set_parent support
*/
struct clk_factors {
struct clk_hw hw;
void __iomem *reg;
struct clk_factors_config *config;
void (*get_factors) (u32 *rate, u32 parent, u8 *n, u8 *k, u8 *m, u8 *p);
spinlock_t *lock;
};
#define to_clk_factors(_hw) container_of(_hw, struct clk_factors, hw)
#define SETMASK(len, pos) (((-1U) >> (31-len)) << (pos))
#define CLRMASK(len, pos) (~(SETMASK(len, pos)))
#define FACTOR_GET(bit, len, reg) (((reg) & SETMASK(len, bit)) >> (bit))
#define FACTOR_SET(bit, len, reg, val) \
(((reg) & CLRMASK(len, bit)) | (val << (bit)))
static unsigned long clk_factors_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
u8 n = 1, k = 0, p = 0, m = 0;
u32 reg;
unsigned long rate;
struct clk_factors *factors = to_clk_factors(hw);
struct clk_factors_config *config = factors->config;
/* Fetch the register value */
reg = readl(factors->reg);
/* Get each individual factor if applicable */
if (config->nwidth != SUNXI_FACTORS_NOT_APPLICABLE)
n = FACTOR_GET(config->nshift, config->nwidth, reg);
if (config->kwidth != SUNXI_FACTORS_NOT_APPLICABLE)
k = FACTOR_GET(config->kshift, config->kwidth, reg);
if (config->mwidth != SUNXI_FACTORS_NOT_APPLICABLE)
m = FACTOR_GET(config->mshift, config->mwidth, reg);
if (config->pwidth != SUNXI_FACTORS_NOT_APPLICABLE)
p = FACTOR_GET(config->pshift, config->pwidth, reg);
/* Calculate the rate */
rate = (parent_rate * n * (k + 1) >> p) / (m + 1);
return rate;
}
static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct clk_factors *factors = to_clk_factors(hw);
factors->get_factors((u32 *)&rate, (u32)*parent_rate,
NULL, NULL, NULL, NULL);
return rate;
}
static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
u8 n, k, m, p;
u32 reg;
struct clk_factors *factors = to_clk_factors(hw);
struct clk_factors_config *config = factors->config;
unsigned long flags = 0;
factors->get_factors((u32 *)&rate, (u32)parent_rate, &n, &k, &m, &p);
if (factors->lock)
spin_lock_irqsave(factors->lock, flags);
/* Fetch the register value */
reg = readl(factors->reg);
/* Set up the new factors - macros do not do anything if width is 0 */
reg = FACTOR_SET(config->nshift, config->nwidth, reg, n);
reg = FACTOR_SET(config->kshift, config->kwidth, reg, k);
reg = FACTOR_SET(config->mshift, config->mwidth, reg, m);
reg = FACTOR_SET(config->pshift, config->pwidth, reg, p);
/* Apply them now */
writel(reg, factors->reg);
/* delay 500us so pll stabilizes */
__delay((rate >> 20) * 500 / 2);
if (factors->lock)
spin_unlock_irqrestore(factors->lock, flags);
return 0;
}
static const struct clk_ops clk_factors_ops = {
.recalc_rate = clk_factors_recalc_rate,
.round_rate = clk_factors_round_rate,
.set_rate = clk_factors_set_rate,
};
/**
* clk_register_factors - register a factors clock with
* the clock framework
* @dev: device registering this clock
* @name: name of this clock
* @parent_name: name of clock's parent
* @flags: framework-specific flags
* @reg: register address to adjust factors
* @config: shift and width of factors n, k, m and p
* @get_factors: function to calculate the factors for a given frequency
* @lock: shared register lock for this clock
*/
struct clk *clk_register_factors(struct device *dev, const char *name,
const char *parent_name,
unsigned long flags, void __iomem *reg,
struct clk_factors_config *config,
void (*get_factors)(u32 *rate, u32 parent,
u8 *n, u8 *k, u8 *m, u8 *p),
spinlock_t *lock)
{
struct clk_factors *factors;
struct clk *clk;
struct clk_init_data init;
/* allocate the factors */
factors = kzalloc(sizeof(struct clk_factors), GFP_KERNEL);
if (!factors) {
pr_err("%s: could not allocate factors clk\n", __func__);
return ERR_PTR(-ENOMEM);
}
init.name = name;
init.ops = &clk_factors_ops;
init.flags = flags;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
/* struct clk_factors assignments */
factors->reg = reg;
factors->config = config;
factors->lock = lock;
factors->hw.init = &init;
factors->get_factors = get_factors;
/* register the clock */
clk = clk_register(dev, &factors->hw);
if (IS_ERR(clk))
kfree(factors);
return clk;
}

View File

@ -0,0 +1,27 @@
#ifndef __MACH_SUNXI_CLK_FACTORS_H
#define __MACH_SUNXI_CLK_FACTORS_H
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#define SUNXI_FACTORS_NOT_APPLICABLE (0)
struct clk_factors_config {
u8 nshift;
u8 nwidth;
u8 kshift;
u8 kwidth;
u8 mshift;
u8 mwidth;
u8 pshift;
u8 pwidth;
};
struct clk *clk_register_factors(struct device *dev, const char *name,
const char *parent_name,
unsigned long flags, void __iomem *reg,
struct clk_factors_config *config,
void (*get_factors) (u32 *rate, u32 parent_rate,
u8 *n, u8 *k, u8 *m, u8 *p),
spinlock_t *lock);
#endif

View File

@ -0,0 +1,362 @@
/*
* Copyright 2013 Emilio López
*
* Emilio López <emilio@elopez.com.ar>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/sunxi.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include "clk-factors.h"
static DEFINE_SPINLOCK(clk_lock);
/**
* sunxi_osc_clk_setup() - Setup function for gatable oscillator
*/
#define SUNXI_OSC24M_GATE 0
static void __init sunxi_osc_clk_setup(struct device_node *node)
{
struct clk *clk;
const char *clk_name = node->name;
const char *parent;
void *reg;
reg = of_iomap(node, 0);
parent = of_clk_get_parent_name(node, 0);
clk = clk_register_gate(NULL, clk_name, parent, CLK_IGNORE_UNUSED,
reg, SUNXI_OSC24M_GATE, 0, &clk_lock);
if (clk) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
clk_register_clkdev(clk, clk_name, NULL);
}
}
/**
* sunxi_get_pll1_factors() - calculates n, k, m, p factors for PLL1
* PLL1 rate is calculated as follows
* rate = (parent_rate * n * (k + 1) >> p) / (m + 1);
* parent_rate is always 24Mhz
*/
static void sunxi_get_pll1_factors(u32 *freq, u32 parent_rate,
u8 *n, u8 *k, u8 *m, u8 *p)
{
u8 div;
/* Normalize value to a 6M multiple */
div = *freq / 6000000;
*freq = 6000000 * div;
/* we were called to round the frequency, we can now return */
if (n == NULL)
return;
/* m is always zero for pll1 */
*m = 0;
/* k is 1 only on these cases */
if (*freq >= 768000000 || *freq == 42000000 || *freq == 54000000)
*k = 1;
else
*k = 0;
/* p will be 3 for divs under 10 */
if (div < 10)
*p = 3;
/* p will be 2 for divs between 10 - 20 and odd divs under 32 */
else if (div < 20 || (div < 32 && (div & 1)))
*p = 2;
/* p will be 1 for even divs under 32, divs under 40 and odd pairs
* of divs between 40-62 */
else if (div < 40 || (div < 64 && (div & 2)))
*p = 1;
/* any other entries have p = 0 */
else
*p = 0;
/* calculate a suitable n based on k and p */
div <<= *p;
div /= (*k + 1);
*n = div / 4;
}
/**
* sunxi_get_apb1_factors() - calculates m, p factors for APB1
* APB1 rate is calculated as follows
* rate = (parent_rate >> p) / (m + 1);
*/
static void sunxi_get_apb1_factors(u32 *freq, u32 parent_rate,
u8 *n, u8 *k, u8 *m, u8 *p)
{
u8 calcm, calcp;
if (parent_rate < *freq)
*freq = parent_rate;
parent_rate = (parent_rate + (*freq - 1)) / *freq;
/* Invalid rate! */
if (parent_rate > 32)
return;
if (parent_rate <= 4)
calcp = 0;
else if (parent_rate <= 8)
calcp = 1;
else if (parent_rate <= 16)
calcp = 2;
else
calcp = 3;
calcm = (parent_rate >> calcp) - 1;
*freq = (parent_rate >> calcp) / (calcm + 1);
/* we were called to round the frequency, we can now return */
if (n == NULL)
return;
*m = calcm;
*p = calcp;
}
/**
* sunxi_factors_clk_setup() - Setup function for factor clocks
*/
struct factors_data {
struct clk_factors_config *table;
void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p);
};
static struct clk_factors_config pll1_config = {
.nshift = 8,
.nwidth = 5,
.kshift = 4,
.kwidth = 2,
.mshift = 0,
.mwidth = 2,
.pshift = 16,
.pwidth = 2,
};
static struct clk_factors_config apb1_config = {
.mshift = 0,
.mwidth = 5,
.pshift = 16,
.pwidth = 2,
};
static const __initconst struct factors_data pll1_data = {
.table = &pll1_config,
.getter = sunxi_get_pll1_factors,
};
static const __initconst struct factors_data apb1_data = {
.table = &apb1_config,
.getter = sunxi_get_apb1_factors,
};
static void __init sunxi_factors_clk_setup(struct device_node *node,
struct factors_data *data)
{
struct clk *clk;
const char *clk_name = node->name;
const char *parent;
void *reg;
reg = of_iomap(node, 0);
parent = of_clk_get_parent_name(node, 0);
clk = clk_register_factors(NULL, clk_name, parent, CLK_IGNORE_UNUSED,
reg, data->table, data->getter, &clk_lock);
if (clk) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
clk_register_clkdev(clk, clk_name, NULL);
}
}
/**
* sunxi_mux_clk_setup() - Setup function for muxes
*/
#define SUNXI_MUX_GATE_WIDTH 2
struct mux_data {
u8 shift;
};
static const __initconst struct mux_data cpu_data = {
.shift = 16,
};
static const __initconst struct mux_data apb1_mux_data = {
.shift = 24,
};
static void __init sunxi_mux_clk_setup(struct device_node *node,
struct mux_data *data)
{
struct clk *clk;
const char *clk_name = node->name;
const char **parents = kmalloc(sizeof(char *) * 5, GFP_KERNEL);
void *reg;
int i = 0;
reg = of_iomap(node, 0);
while (i < 5 && (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
i++;
clk = clk_register_mux(NULL, clk_name, parents, i, 0, reg,
data->shift, SUNXI_MUX_GATE_WIDTH,
0, &clk_lock);
if (clk) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
clk_register_clkdev(clk, clk_name, NULL);
}
}
/**
* sunxi_divider_clk_setup() - Setup function for simple divider clocks
*/
#define SUNXI_DIVISOR_WIDTH 2
struct div_data {
u8 shift;
u8 pow;
};
static const __initconst struct div_data axi_data = {
.shift = 0,
.pow = 0,
};
static const __initconst struct div_data ahb_data = {
.shift = 4,
.pow = 1,
};
static const __initconst struct div_data apb0_data = {
.shift = 8,
.pow = 1,
};
static void __init sunxi_divider_clk_setup(struct device_node *node,
struct div_data *data)
{
struct clk *clk;
const char *clk_name = node->name;
const char *clk_parent;
void *reg;
reg = of_iomap(node, 0);
clk_parent = of_clk_get_parent_name(node, 0);
clk = clk_register_divider(NULL, clk_name, clk_parent, 0,
reg, data->shift, SUNXI_DIVISOR_WIDTH,
data->pow ? CLK_DIVIDER_POWER_OF_TWO : 0,
&clk_lock);
if (clk) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
clk_register_clkdev(clk, clk_name, NULL);
}
}
/* Matches for of_clk_init */
static const __initconst struct of_device_id clk_match[] = {
{.compatible = "fixed-clock", .data = of_fixed_clk_setup,},
{.compatible = "allwinner,sun4i-osc-clk", .data = sunxi_osc_clk_setup,},
{}
};
/* Matches for factors clocks */
static const __initconst struct of_device_id clk_factors_match[] = {
{.compatible = "allwinner,sun4i-pll1-clk", .data = &pll1_data,},
{.compatible = "allwinner,sun4i-apb1-clk", .data = &apb1_data,},
{}
};
/* Matches for divider clocks */
static const __initconst struct of_device_id clk_div_match[] = {
{.compatible = "allwinner,sun4i-axi-clk", .data = &axi_data,},
{.compatible = "allwinner,sun4i-ahb-clk", .data = &ahb_data,},
{.compatible = "allwinner,sun4i-apb0-clk", .data = &apb0_data,},
{}
};
/* Matches for mux clocks */
static const __initconst struct of_device_id clk_mux_match[] = {
{.compatible = "allwinner,sun4i-cpu-clk", .data = &cpu_data,},
{.compatible = "allwinner,sun4i-apb1-mux-clk", .data = &apb1_mux_data,},
{}
};
static void __init of_sunxi_table_clock_setup(const struct of_device_id *clk_match,
void *function)
{
struct device_node *np;
const struct div_data *data;
const struct of_device_id *match;
void (*setup_function)(struct device_node *, const void *) = function;
for_each_matching_node(np, clk_match) {
match = of_match_node(clk_match, np);
data = match->data;
setup_function(np, data);
}
}
void __init sunxi_init_clocks(void)
{
/* Register all the simple sunxi clocks on DT */
of_clk_init(clk_match);
/* Register factor clocks */
of_sunxi_table_clock_setup(clk_factors_match, sunxi_factors_clk_setup);
/* Register divider clocks */
of_sunxi_table_clock_setup(clk_div_match, sunxi_divider_clk_setup);
/* Register mux clocks */
of_sunxi_table_clock_setup(clk_mux_match, sunxi_mux_clk_setup);
}

View File

@ -355,15 +355,16 @@ struct clk *tegra_clk_register_periph_nodiv(const char *name,
struct tegra_clk_periph *periph, void __iomem *clk_base,
u32 offset);
#define TEGRA_CLK_PERIPH(_mux_shift, _mux_width, _mux_flags, \
#define TEGRA_CLK_PERIPH(_mux_shift, _mux_mask, _mux_flags, \
_div_shift, _div_width, _div_frac_width, \
_div_flags, _clk_num, _enb_refcnt, _regs, \
_gate_flags) \
_gate_flags, _table) \
{ \
.mux = { \
.flags = _mux_flags, \
.shift = _mux_shift, \
.width = _mux_width, \
.mask = _mux_mask, \
.table = _table, \
}, \
.divider = { \
.flags = _div_flags, \
@ -393,26 +394,36 @@ struct tegra_periph_init_data {
const char *dev_id;
};
#define TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parent_names, _offset, \
_mux_shift, _mux_width, _mux_flags, _div_shift, \
#define TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parent_names, _offset,\
_mux_shift, _mux_mask, _mux_flags, _div_shift, \
_div_width, _div_frac_width, _div_flags, _regs, \
_clk_num, _enb_refcnt, _gate_flags, _clk_id) \
_clk_num, _enb_refcnt, _gate_flags, _clk_id, _table) \
{ \
.name = _name, \
.clk_id = _clk_id, \
.parent_names = _parent_names, \
.num_parents = ARRAY_SIZE(_parent_names), \
.periph = TEGRA_CLK_PERIPH(_mux_shift, _mux_width, \
.periph = TEGRA_CLK_PERIPH(_mux_shift, _mux_mask, \
_mux_flags, _div_shift, \
_div_width, _div_frac_width, \
_div_flags, _clk_num, \
_enb_refcnt, _regs, \
_gate_flags), \
_gate_flags, _table), \
.offset = _offset, \
.con_id = _con_id, \
.dev_id = _dev_id, \
}
#define TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parent_names, _offset,\
_mux_shift, _mux_width, _mux_flags, _div_shift, \
_div_width, _div_frac_width, _div_flags, _regs, \
_clk_num, _enb_refcnt, _gate_flags, _clk_id) \
TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parent_names, _offset,\
_mux_shift, BIT(_mux_width) - 1, _mux_flags, \
_div_shift, _div_width, _div_frac_width, _div_flags, \
_regs, _clk_num, _enb_refcnt, _gate_flags, _clk_id,\
NULL)
/**
* struct clk_super_mux - super clock
*

View File

@ -20,15 +20,23 @@
struct clk_prcmu {
struct clk_hw hw;
u8 cg_sel;
int is_prepared;
int is_enabled;
int opp_requested;
};
/* PRCMU clock operations. */
static int clk_prcmu_prepare(struct clk_hw *hw)
{
int ret;
struct clk_prcmu *clk = to_clk_prcmu(hw);
return prcmu_request_clock(clk->cg_sel, true);
ret = prcmu_request_clock(clk->cg_sel, true);
if (!ret)
clk->is_prepared = 1;
return ret;;
}
static void clk_prcmu_unprepare(struct clk_hw *hw)
@ -36,7 +44,15 @@ static void clk_prcmu_unprepare(struct clk_hw *hw)
struct clk_prcmu *clk = to_clk_prcmu(hw);
if (prcmu_request_clock(clk->cg_sel, false))
pr_err("clk_prcmu: %s failed to disable %s.\n", __func__,
hw->init->name);
__clk_get_name(hw->clk));
else
clk->is_prepared = 0;
}
static int clk_prcmu_is_prepared(struct clk_hw *hw)
{
struct clk_prcmu *clk = to_clk_prcmu(hw);
return clk->is_prepared;
}
static int clk_prcmu_enable(struct clk_hw *hw)
@ -79,58 +95,52 @@ static int clk_prcmu_set_rate(struct clk_hw *hw, unsigned long rate,
return prcmu_set_clock_rate(clk->cg_sel, rate);
}
static int request_ape_opp100(bool enable)
{
static int reqs;
int err = 0;
if (enable) {
if (!reqs)
err = prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP,
"clock", 100);
if (!err)
reqs++;
} else {
reqs--;
if (!reqs)
prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
"clock");
}
return err;
}
static int clk_prcmu_opp_prepare(struct clk_hw *hw)
{
int err;
struct clk_prcmu *clk = to_clk_prcmu(hw);
err = request_ape_opp100(true);
if (err) {
pr_err("clk_prcmu: %s failed to request APE OPP100 for %s.\n",
__func__, hw->init->name);
return err;
if (!clk->opp_requested) {
err = prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP,
(char *)__clk_get_name(hw->clk),
100);
if (err) {
pr_err("clk_prcmu: %s fail req APE OPP for %s.\n",
__func__, __clk_get_name(hw->clk));
return err;
}
clk->opp_requested = 1;
}
err = prcmu_request_clock(clk->cg_sel, true);
if (err)
request_ape_opp100(false);
if (err) {
prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
(char *)__clk_get_name(hw->clk));
clk->opp_requested = 0;
return err;
}
return err;
clk->is_prepared = 1;
return 0;
}
static void clk_prcmu_opp_unprepare(struct clk_hw *hw)
{
struct clk_prcmu *clk = to_clk_prcmu(hw);
if (prcmu_request_clock(clk->cg_sel, false))
goto out_error;
if (request_ape_opp100(false))
goto out_error;
return;
if (prcmu_request_clock(clk->cg_sel, false)) {
pr_err("clk_prcmu: %s failed to disable %s.\n", __func__,
__clk_get_name(hw->clk));
return;
}
out_error:
pr_err("clk_prcmu: %s failed to disable %s.\n", __func__,
hw->init->name);
if (clk->opp_requested) {
prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
(char *)__clk_get_name(hw->clk));
clk->opp_requested = 0;
}
clk->is_prepared = 0;
}
static int clk_prcmu_opp_volt_prepare(struct clk_hw *hw)
@ -138,38 +148,49 @@ static int clk_prcmu_opp_volt_prepare(struct clk_hw *hw)
int err;
struct clk_prcmu *clk = to_clk_prcmu(hw);
err = prcmu_request_ape_opp_100_voltage(true);
if (err) {
pr_err("clk_prcmu: %s failed to request APE OPP VOLT for %s.\n",
__func__, hw->init->name);
return err;
if (!clk->opp_requested) {
err = prcmu_request_ape_opp_100_voltage(true);
if (err) {
pr_err("clk_prcmu: %s fail req APE OPP VOLT for %s.\n",
__func__, __clk_get_name(hw->clk));
return err;
}
clk->opp_requested = 1;
}
err = prcmu_request_clock(clk->cg_sel, true);
if (err)
if (err) {
prcmu_request_ape_opp_100_voltage(false);
clk->opp_requested = 0;
return err;
}
return err;
clk->is_prepared = 1;
return 0;
}
static void clk_prcmu_opp_volt_unprepare(struct clk_hw *hw)
{
struct clk_prcmu *clk = to_clk_prcmu(hw);
if (prcmu_request_clock(clk->cg_sel, false))
goto out_error;
if (prcmu_request_ape_opp_100_voltage(false))
goto out_error;
return;
if (prcmu_request_clock(clk->cg_sel, false)) {
pr_err("clk_prcmu: %s failed to disable %s.\n", __func__,
__clk_get_name(hw->clk));
return;
}
out_error:
pr_err("clk_prcmu: %s failed to disable %s.\n", __func__,
hw->init->name);
if (clk->opp_requested) {
prcmu_request_ape_opp_100_voltage(false);
clk->opp_requested = 0;
}
clk->is_prepared = 0;
}
static struct clk_ops clk_prcmu_scalable_ops = {
.prepare = clk_prcmu_prepare,
.unprepare = clk_prcmu_unprepare,
.is_prepared = clk_prcmu_is_prepared,
.enable = clk_prcmu_enable,
.disable = clk_prcmu_disable,
.is_enabled = clk_prcmu_is_enabled,
@ -181,6 +202,7 @@ static struct clk_ops clk_prcmu_scalable_ops = {
static struct clk_ops clk_prcmu_gate_ops = {
.prepare = clk_prcmu_prepare,
.unprepare = clk_prcmu_unprepare,
.is_prepared = clk_prcmu_is_prepared,
.enable = clk_prcmu_enable,
.disable = clk_prcmu_disable,
.is_enabled = clk_prcmu_is_enabled,
@ -202,6 +224,7 @@ static struct clk_ops clk_prcmu_rate_ops = {
static struct clk_ops clk_prcmu_opp_gate_ops = {
.prepare = clk_prcmu_opp_prepare,
.unprepare = clk_prcmu_opp_unprepare,
.is_prepared = clk_prcmu_is_prepared,
.enable = clk_prcmu_enable,
.disable = clk_prcmu_disable,
.is_enabled = clk_prcmu_is_enabled,
@ -211,6 +234,7 @@ static struct clk_ops clk_prcmu_opp_gate_ops = {
static struct clk_ops clk_prcmu_opp_volt_scalable_ops = {
.prepare = clk_prcmu_opp_volt_prepare,
.unprepare = clk_prcmu_opp_volt_unprepare,
.is_prepared = clk_prcmu_is_prepared,
.enable = clk_prcmu_enable,
.disable = clk_prcmu_disable,
.is_enabled = clk_prcmu_is_enabled,
@ -242,7 +266,9 @@ static struct clk *clk_reg_prcmu(const char *name,
}
clk->cg_sel = cg_sel;
clk->is_prepared = 1;
clk->is_enabled = 1;
clk->opp_requested = 0;
/* "rate" can be used for changing the initial frequency */
if (rate)
prcmu_set_clock_rate(cg_sel, rate);

View File

@ -23,7 +23,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sunxi_timer.h>
#include <linux/clk-provider.h>
#include <linux/clk/sunxi.h>
#define TIMER_CTL_REG 0x00
#define TIMER_CTL_ENABLE (1 << 0)
@ -123,7 +123,7 @@ void __init sunxi_timer_init(void)
if (irq <= 0)
panic("Can't parse IRQ");
of_clk_init(NULL);
sunxi_init_clocks();
clk = of_clk_get(node, 0);
if (IS_ERR(clk))

View File

@ -152,7 +152,7 @@ struct clk {
}, \
.reg = _reg, \
.shift = _shift, \
.width = _width, \
.mask = BIT(_width) - 1, \
.flags = _mux_flags, \
.lock = _lock, \
}; \

View File

@ -45,6 +45,14 @@ struct clk_hw;
* undo any work done in the @prepare callback. Called with
* prepare_lock held.
*
* @is_prepared: Queries the hardware to determine if the clock is prepared.
* This function is allowed to sleep. Optional, if this op is not
* set then the prepare count will be used.
*
* @unprepare_unused: Unprepare the clock atomically. Only called from
* clk_disable_unused for prepare clocks with special needs.
* Called with prepare mutex held. This function may sleep.
*
* @enable: Enable the clock atomically. This must not return until the
* clock is generating a valid clock signal, usable by consumer
* devices. Called with enable_lock held. This function must not
@ -108,6 +116,8 @@ struct clk_hw;
struct clk_ops {
int (*prepare)(struct clk_hw *hw);
void (*unprepare)(struct clk_hw *hw);
int (*is_prepared)(struct clk_hw *hw);
void (*unprepare_unused)(struct clk_hw *hw);
int (*enable)(struct clk_hw *hw);
void (*disable)(struct clk_hw *hw);
int (*is_enabled)(struct clk_hw *hw);
@ -287,8 +297,9 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
struct clk_mux {
struct clk_hw hw;
void __iomem *reg;
u32 *table;
u32 mask;
u8 shift;
u8 width;
u8 flags;
spinlock_t *lock;
};
@ -297,11 +308,17 @@ struct clk_mux {
#define CLK_MUX_INDEX_BIT BIT(1)
extern const struct clk_ops clk_mux_ops;
struct clk *clk_register_mux(struct device *dev, const char *name,
const char **parent_names, u8 num_parents, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_mux_flags, spinlock_t *lock);
struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char **parent_names, u8 num_parents, unsigned long flags,
void __iomem *reg, u8 shift, u32 mask,
u8 clk_mux_flags, u32 *table, spinlock_t *lock);
/**
* struct clk_fixed_factor - fixed multiplier and divider clock
*
@ -325,6 +342,37 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div);
/***
* struct clk_composite - aggregate clock of mux, divider and gate clocks
*
* @hw: handle between common and hardware-specific interfaces
* @mux_hw: handle between composite and hardware-specifix mux clock
* @div_hw: handle between composite and hardware-specifix divider clock
* @gate_hw: handle between composite and hardware-specifix gate clock
* @mux_ops: clock ops for mux
* @div_ops: clock ops for divider
* @gate_ops: clock ops for gate
*/
struct clk_composite {
struct clk_hw hw;
struct clk_ops ops;
struct clk_hw *mux_hw;
struct clk_hw *div_hw;
struct clk_hw *gate_hw;
const struct clk_ops *mux_ops;
const struct clk_ops *div_ops;
const struct clk_ops *gate_ops;
};
struct clk *clk_register_composite(struct device *dev, const char *name,
const char **parent_names, int num_parents,
struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
struct clk_hw *div_hw, const struct clk_ops *div_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags);
/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
@ -351,6 +399,7 @@ unsigned int __clk_get_enable_count(struct clk *clk);
unsigned int __clk_get_prepare_count(struct clk *clk);
unsigned long __clk_get_rate(struct clk *clk);
unsigned long __clk_get_flags(struct clk *clk);
bool __clk_is_prepared(struct clk *clk);
bool __clk_is_enabled(struct clk *clk);
struct clk *__clk_lookup(const char *name);

22
include/linux/clk/sunxi.h Normal file
View File

@ -0,0 +1,22 @@
/*
* Copyright 2012 Maxime Ripard
*
* Maxime Ripard <maxime.ripard@free-electrons.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __LINUX_CLK_SUNXI_H_
#define __LINUX_CLK_SUNXI_H_
void __init sunxi_init_clocks(void);
#endif