2009-12-09 01:47:16 +00:00
|
|
|
/*
|
|
|
|
* OMAP3/4 - specific DPLL control functions
|
|
|
|
*
|
2010-02-23 05:09:08 +00:00
|
|
|
* Copyright (C) 2009-2010 Texas Instruments, Inc.
|
|
|
|
* Copyright (C) 2009-2010 Nokia Corporation
|
2009-12-09 01:47:16 +00:00
|
|
|
*
|
|
|
|
* Written by Paul Walmsley
|
2010-02-23 05:09:08 +00:00
|
|
|
* Testing and integration fixes by Jouni Högander
|
|
|
|
*
|
|
|
|
* 36xx support added by Vishwanath BS, Richard Woodruff, and Nishanth
|
|
|
|
* Menon
|
2009-12-09 01:47:16 +00:00
|
|
|
*
|
|
|
|
* Parts of this code are based on code written by
|
|
|
|
* Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/clk.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/bitops.h>
|
2010-11-17 09:04:33 +00:00
|
|
|
#include <linux/clkdev.h>
|
2009-12-09 01:47:16 +00:00
|
|
|
|
2012-11-10 23:58:41 +00:00
|
|
|
#include "clockdomain.h"
|
2009-12-09 01:47:16 +00:00
|
|
|
#include "clock.h"
|
|
|
|
|
|
|
|
/* CM_AUTOIDLE_PLL*.AUTO_* bit values */
|
|
|
|
#define DPLL_AUTOIDLE_DISABLE 0x0
|
|
|
|
#define DPLL_AUTOIDLE_LOW_POWER_STOP 0x1
|
|
|
|
|
|
|
|
#define MAX_DPLL_WAIT_TRIES 1000000
|
|
|
|
|
2010-01-27 03:13:11 +00:00
|
|
|
/* Private functions */
|
2009-12-09 01:47:16 +00:00
|
|
|
|
|
|
|
/* _omap3_dpll_write_clken - write clken_bits arg to a DPLL's enable bits */
|
2012-11-10 23:58:41 +00:00
|
|
|
static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits)
|
2009-12-09 01:47:16 +00:00
|
|
|
{
|
|
|
|
const struct dpll_data *dd;
|
|
|
|
u32 v;
|
|
|
|
|
|
|
|
dd = clk->dpll_data;
|
|
|
|
|
2013-10-22 08:49:58 +00:00
|
|
|
v = omap2_clk_readl(clk, dd->control_reg);
|
2009-12-09 01:47:16 +00:00
|
|
|
v &= ~dd->enable_mask;
|
|
|
|
v |= clken_bits << __ffs(dd->enable_mask);
|
2013-10-22 08:49:58 +00:00
|
|
|
omap2_clk_writel(v, clk, dd->control_reg);
|
2009-12-09 01:47:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */
|
2012-11-10 23:58:41 +00:00
|
|
|
static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state)
|
2009-12-09 01:47:16 +00:00
|
|
|
{
|
|
|
|
const struct dpll_data *dd;
|
|
|
|
int i = 0;
|
|
|
|
int ret = -EINVAL;
|
2012-09-22 08:24:17 +00:00
|
|
|
const char *clk_name;
|
2009-12-09 01:47:16 +00:00
|
|
|
|
|
|
|
dd = clk->dpll_data;
|
2012-11-10 23:58:41 +00:00
|
|
|
clk_name = __clk_get_name(clk->hw.clk);
|
2009-12-09 01:47:16 +00:00
|
|
|
|
|
|
|
state <<= __ffs(dd->idlest_mask);
|
|
|
|
|
2013-10-22 08:49:58 +00:00
|
|
|
while (((omap2_clk_readl(clk, dd->idlest_reg) & dd->idlest_mask)
|
|
|
|
!= state) && i < MAX_DPLL_WAIT_TRIES) {
|
2009-12-09 01:47:16 +00:00
|
|
|
i++;
|
|
|
|
udelay(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i == MAX_DPLL_WAIT_TRIES) {
|
|
|
|
printk(KERN_ERR "clock: %s failed transition to '%s'\n",
|
2012-09-22 08:24:17 +00:00
|
|
|
clk_name, (state) ? "locked" : "bypassed");
|
2009-12-09 01:47:16 +00:00
|
|
|
} else {
|
|
|
|
pr_debug("clock: %s transition to '%s' in %d loops\n",
|
2012-09-22 08:24:17 +00:00
|
|
|
clk_name, (state) ? "locked" : "bypassed", i);
|
2009-12-09 01:47:16 +00:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* From 3430 TRM ES2 4.7.6.2 */
|
2012-11-10 23:58:41 +00:00
|
|
|
static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n)
|
2009-12-09 01:47:16 +00:00
|
|
|
{
|
|
|
|
unsigned long fint;
|
|
|
|
u16 f = 0;
|
|
|
|
|
2012-09-22 08:24:17 +00:00
|
|
|
fint = __clk_get_rate(clk->dpll_data->clk_ref) / n;
|
2009-12-09 01:47:16 +00:00
|
|
|
|
|
|
|
pr_debug("clock: fint is %lu\n", fint);
|
|
|
|
|
|
|
|
if (fint >= 750000 && fint <= 1000000)
|
|
|
|
f = 0x3;
|
|
|
|
else if (fint > 1000000 && fint <= 1250000)
|
|
|
|
f = 0x4;
|
|
|
|
else if (fint > 1250000 && fint <= 1500000)
|
|
|
|
f = 0x5;
|
|
|
|
else if (fint > 1500000 && fint <= 1750000)
|
|
|
|
f = 0x6;
|
|
|
|
else if (fint > 1750000 && fint <= 2100000)
|
|
|
|
f = 0x7;
|
|
|
|
else if (fint > 7500000 && fint <= 10000000)
|
|
|
|
f = 0xB;
|
|
|
|
else if (fint > 10000000 && fint <= 12500000)
|
|
|
|
f = 0xC;
|
|
|
|
else if (fint > 12500000 && fint <= 15000000)
|
|
|
|
f = 0xD;
|
|
|
|
else if (fint > 15000000 && fint <= 17500000)
|
|
|
|
f = 0xE;
|
|
|
|
else if (fint > 17500000 && fint <= 21000000)
|
|
|
|
f = 0xF;
|
|
|
|
else
|
|
|
|
pr_debug("clock: unknown freqsel setting for %d\n", n);
|
|
|
|
|
|
|
|
return f;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness
|
|
|
|
* @clk: pointer to a DPLL struct clk
|
|
|
|
*
|
|
|
|
* Instructs a non-CORE DPLL to lock. Waits for the DPLL to report
|
|
|
|
* readiness before returning. Will save and restore the DPLL's
|
|
|
|
* autoidle state across the enable, per the CDP code. If the DPLL
|
|
|
|
* locked successfully, return 0; if the DPLL did not lock in the time
|
|
|
|
* allotted, or DPLL3 was passed in, return -EINVAL.
|
|
|
|
*/
|
2012-11-10 23:58:41 +00:00
|
|
|
static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk)
|
2009-12-09 01:47:16 +00:00
|
|
|
{
|
2012-07-04 11:00:44 +00:00
|
|
|
const struct dpll_data *dd;
|
2009-12-09 01:47:16 +00:00
|
|
|
u8 ai;
|
2012-07-04 11:00:44 +00:00
|
|
|
u8 state = 1;
|
|
|
|
int r = 0;
|
2009-12-09 01:47:16 +00:00
|
|
|
|
2012-11-10 23:58:41 +00:00
|
|
|
pr_debug("clock: locking DPLL %s\n", __clk_get_name(clk->hw.clk));
|
2009-12-09 01:47:16 +00:00
|
|
|
|
2012-07-04 11:00:44 +00:00
|
|
|
dd = clk->dpll_data;
|
|
|
|
state <<= __ffs(dd->idlest_mask);
|
|
|
|
|
|
|
|
/* Check if already locked */
|
2013-10-22 08:49:58 +00:00
|
|
|
if ((omap2_clk_readl(clk, dd->idlest_reg) & dd->idlest_mask) == state)
|
2012-07-04 11:00:44 +00:00
|
|
|
goto done;
|
|
|
|
|
2009-12-09 01:47:16 +00:00
|
|
|
ai = omap3_dpll_autoidle_read(clk);
|
|
|
|
|
2012-05-08 05:55:30 +00:00
|
|
|
if (ai)
|
|
|
|
omap3_dpll_deny_idle(clk);
|
2009-12-09 01:47:16 +00:00
|
|
|
|
|
|
|
_omap3_dpll_write_clken(clk, DPLL_LOCKED);
|
|
|
|
|
|
|
|
r = _omap3_wait_dpll_status(clk, 1);
|
|
|
|
|
|
|
|
if (ai)
|
|
|
|
omap3_dpll_allow_idle(clk);
|
|
|
|
|
2012-07-04 11:00:44 +00:00
|
|
|
done:
|
2009-12-09 01:47:16 +00:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness
|
|
|
|
* @clk: pointer to a DPLL struct clk
|
|
|
|
*
|
|
|
|
* Instructs a non-CORE DPLL to enter low-power bypass mode. In
|
|
|
|
* bypass mode, the DPLL's rate is set equal to its parent clock's
|
|
|
|
* rate. Waits for the DPLL to report readiness before returning.
|
|
|
|
* Will save and restore the DPLL's autoidle state across the enable,
|
|
|
|
* per the CDP code. If the DPLL entered bypass mode successfully,
|
|
|
|
* return 0; if the DPLL did not enter bypass in the time allotted, or
|
|
|
|
* DPLL3 was passed in, or the DPLL does not support low-power bypass,
|
|
|
|
* return -EINVAL.
|
|
|
|
*/
|
2012-11-10 23:58:41 +00:00
|
|
|
static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk)
|
2009-12-09 01:47:16 +00:00
|
|
|
{
|
|
|
|
int r;
|
|
|
|
u8 ai;
|
|
|
|
|
|
|
|
if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
pr_debug("clock: configuring DPLL %s for low-power bypass\n",
|
2012-11-10 23:58:41 +00:00
|
|
|
__clk_get_name(clk->hw.clk));
|
2009-12-09 01:47:16 +00:00
|
|
|
|
|
|
|
ai = omap3_dpll_autoidle_read(clk);
|
|
|
|
|
|
|
|
_omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS);
|
|
|
|
|
|
|
|
r = _omap3_wait_dpll_status(clk, 0);
|
|
|
|
|
|
|
|
if (ai)
|
|
|
|
omap3_dpll_allow_idle(clk);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* _omap3_noncore_dpll_stop - instruct a DPLL to stop
|
|
|
|
* @clk: pointer to a DPLL struct clk
|
|
|
|
*
|
|
|
|
* Instructs a non-CORE DPLL to enter low-power stop. Will save and
|
|
|
|
* restore the DPLL's autoidle state across the stop, per the CDP
|
|
|
|
* code. If DPLL3 was passed in, or the DPLL does not support
|
|
|
|
* low-power stop, return -EINVAL; otherwise, return 0.
|
|
|
|
*/
|
2012-11-10 23:58:41 +00:00
|
|
|
static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk)
|
2009-12-09 01:47:16 +00:00
|
|
|
{
|
|
|
|
u8 ai;
|
|
|
|
|
|
|
|
if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2012-11-10 23:58:41 +00:00
|
|
|
pr_debug("clock: stopping DPLL %s\n", __clk_get_name(clk->hw.clk));
|
2009-12-09 01:47:16 +00:00
|
|
|
|
|
|
|
ai = omap3_dpll_autoidle_read(clk);
|
|
|
|
|
|
|
|
_omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP);
|
|
|
|
|
|
|
|
if (ai)
|
|
|
|
omap3_dpll_allow_idle(clk);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-02-23 05:09:08 +00:00
|
|
|
/**
|
2010-12-22 04:31:43 +00:00
|
|
|
* _lookup_dco - Lookup DCO used by j-type DPLL
|
2010-02-23 05:09:08 +00:00
|
|
|
* @clk: pointer to a DPLL struct clk
|
|
|
|
* @dco: digital control oscillator selector
|
|
|
|
* @m: DPLL multiplier to set
|
|
|
|
* @n: DPLL divider to set
|
|
|
|
*
|
|
|
|
* See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)"
|
|
|
|
*
|
|
|
|
* XXX This code is not needed for 3430/AM35xx; can it be optimized
|
|
|
|
* out in non-multi-OMAP builds for those chips?
|
|
|
|
*/
|
2012-11-10 23:58:41 +00:00
|
|
|
static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n)
|
2010-02-23 05:09:08 +00:00
|
|
|
{
|
2010-12-22 04:31:43 +00:00
|
|
|
unsigned long fint, clkinp; /* watch out for overflow */
|
2010-02-23 05:09:08 +00:00
|
|
|
|
2012-11-10 23:58:41 +00:00
|
|
|
clkinp = __clk_get_rate(__clk_get_parent(clk->hw.clk));
|
2010-02-23 05:09:08 +00:00
|
|
|
fint = (clkinp / n) * m;
|
|
|
|
|
|
|
|
if (fint < 1000000000)
|
|
|
|
*dco = 2;
|
|
|
|
else
|
|
|
|
*dco = 4;
|
2010-12-22 04:31:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* _lookup_sddiv - Calculate sigma delta divider for j-type DPLL
|
|
|
|
* @clk: pointer to a DPLL struct clk
|
|
|
|
* @sd_div: target sigma-delta divider
|
|
|
|
* @m: DPLL multiplier to set
|
|
|
|
* @n: DPLL divider to set
|
|
|
|
*
|
|
|
|
* See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)"
|
|
|
|
*
|
|
|
|
* XXX This code is not needed for 3430/AM35xx; can it be optimized
|
|
|
|
* out in non-multi-OMAP builds for those chips?
|
|
|
|
*/
|
2012-11-10 23:58:41 +00:00
|
|
|
static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n)
|
2010-12-22 04:31:43 +00:00
|
|
|
{
|
|
|
|
unsigned long clkinp, sd; /* watch out for overflow */
|
|
|
|
int mod1, mod2;
|
|
|
|
|
2012-11-10 23:58:41 +00:00
|
|
|
clkinp = __clk_get_rate(__clk_get_parent(clk->hw.clk));
|
2010-12-22 04:31:43 +00:00
|
|
|
|
2010-02-23 05:09:08 +00:00
|
|
|
/*
|
|
|
|
* target sigma-delta to near 250MHz
|
|
|
|
* sd = ceil[(m/(n+1)) * (clkinp_MHz / 250)]
|
|
|
|
*/
|
|
|
|
clkinp /= 100000; /* shift from MHz to 10*Hz for 38.4 and 19.2 */
|
|
|
|
mod1 = (clkinp * m) % (250 * n);
|
|
|
|
sd = (clkinp * m) / (250 * n);
|
|
|
|
mod2 = sd % 10;
|
|
|
|
sd /= 10;
|
|
|
|
|
|
|
|
if (mod1 || mod2)
|
|
|
|
sd++;
|
|
|
|
*sd_div = sd;
|
|
|
|
}
|
|
|
|
|
2010-01-27 03:13:11 +00:00
|
|
|
/*
|
|
|
|
* _omap3_noncore_dpll_program - set non-core DPLL M,N values directly
|
2012-12-15 08:35:46 +00:00
|
|
|
* @clk: struct clk * of DPLL to set
|
|
|
|
* @freqsel: FREQSEL value to set
|
2010-01-27 03:13:11 +00:00
|
|
|
*
|
2012-12-15 08:35:46 +00:00
|
|
|
* Program the DPLL with the last M, N values calculated, and wait for
|
|
|
|
* the DPLL to lock. Returns -EINVAL upon error, or 0 upon success.
|
2010-01-27 03:13:11 +00:00
|
|
|
*/
|
2012-12-15 08:35:46 +00:00
|
|
|
static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
|
2010-01-27 03:13:11 +00:00
|
|
|
{
|
|
|
|
struct dpll_data *dd = clk->dpll_data;
|
2010-12-22 04:31:43 +00:00
|
|
|
u8 dco, sd_div;
|
2010-01-27 03:13:11 +00:00
|
|
|
u32 v;
|
|
|
|
|
|
|
|
/* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */
|
|
|
|
_omap3_noncore_dpll_bypass(clk);
|
|
|
|
|
2010-02-24 19:05:57 +00:00
|
|
|
/*
|
2013-01-29 13:03:49 +00:00
|
|
|
* Set jitter correction. Jitter correction applicable for OMAP343X
|
|
|
|
* only since freqsel field is no longer present on other devices.
|
2010-02-24 19:05:57 +00:00
|
|
|
*/
|
2014-07-02 08:47:43 +00:00
|
|
|
if (ti_clk_features.flags & TI_CLK_DPLL_HAS_FREQSEL) {
|
2013-10-22 08:49:58 +00:00
|
|
|
v = omap2_clk_readl(clk, dd->control_reg);
|
2010-01-27 03:13:11 +00:00
|
|
|
v &= ~dd->freqsel_mask;
|
|
|
|
v |= freqsel << __ffs(dd->freqsel_mask);
|
2013-10-22 08:49:58 +00:00
|
|
|
omap2_clk_writel(v, clk, dd->control_reg);
|
2010-01-27 03:13:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Set DPLL multiplier, divider */
|
2013-10-22 08:49:58 +00:00
|
|
|
v = omap2_clk_readl(clk, dd->mult_div1_reg);
|
2014-05-16 10:45:58 +00:00
|
|
|
|
|
|
|
/* Handle Duty Cycle Correction */
|
|
|
|
if (dd->dcc_mask) {
|
|
|
|
if (dd->last_rounded_rate >= dd->dcc_rate)
|
|
|
|
v |= dd->dcc_mask; /* Enable DCC */
|
|
|
|
else
|
|
|
|
v &= ~dd->dcc_mask; /* Disable DCC */
|
|
|
|
}
|
|
|
|
|
2010-01-27 03:13:11 +00:00
|
|
|
v &= ~(dd->mult_mask | dd->div1_mask);
|
2012-12-15 08:35:46 +00:00
|
|
|
v |= dd->last_rounded_m << __ffs(dd->mult_mask);
|
|
|
|
v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
|
2010-02-23 05:09:08 +00:00
|
|
|
|
2010-12-22 04:31:43 +00:00
|
|
|
/* Configure dco and sd_div for dplls that have these fields */
|
|
|
|
if (dd->dco_mask) {
|
2012-12-15 08:35:46 +00:00
|
|
|
_lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n);
|
2010-12-22 04:31:43 +00:00
|
|
|
v &= ~(dd->dco_mask);
|
|
|
|
v |= dco << __ffs(dd->dco_mask);
|
|
|
|
}
|
|
|
|
if (dd->sddiv_mask) {
|
2012-12-15 08:35:46 +00:00
|
|
|
_lookup_sddiv(clk, &sd_div, dd->last_rounded_m,
|
|
|
|
dd->last_rounded_n);
|
2010-12-22 04:31:43 +00:00
|
|
|
v &= ~(dd->sddiv_mask);
|
|
|
|
v |= sd_div << __ffs(dd->sddiv_mask);
|
2010-02-23 05:09:08 +00:00
|
|
|
}
|
|
|
|
|
2013-10-22 08:49:58 +00:00
|
|
|
omap2_clk_writel(v, clk, dd->mult_div1_reg);
|
2010-01-27 03:13:11 +00:00
|
|
|
|
2012-12-15 08:35:46 +00:00
|
|
|
/* Set 4X multiplier and low-power mode */
|
|
|
|
if (dd->m4xen_mask || dd->lpmode_mask) {
|
2013-10-22 08:49:58 +00:00
|
|
|
v = omap2_clk_readl(clk, dd->control_reg);
|
2012-12-15 08:35:46 +00:00
|
|
|
|
|
|
|
if (dd->m4xen_mask) {
|
|
|
|
if (dd->last_rounded_m4xen)
|
|
|
|
v |= dd->m4xen_mask;
|
|
|
|
else
|
|
|
|
v &= ~dd->m4xen_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dd->lpmode_mask) {
|
|
|
|
if (dd->last_rounded_lpmode)
|
|
|
|
v |= dd->lpmode_mask;
|
|
|
|
else
|
|
|
|
v &= ~dd->lpmode_mask;
|
|
|
|
}
|
|
|
|
|
2013-10-22 08:49:58 +00:00
|
|
|
omap2_clk_writel(v, clk, dd->control_reg);
|
2012-12-15 08:35:46 +00:00
|
|
|
}
|
|
|
|
|
2010-01-27 03:13:11 +00:00
|
|
|
/* We let the clock framework set the other output dividers later */
|
|
|
|
|
|
|
|
/* REVISIT: Set ramp-up delay? */
|
|
|
|
|
|
|
|
_omap3_noncore_dpll_lock(clk);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Public functions */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* omap3_dpll_recalc - recalculate DPLL rate
|
|
|
|
* @clk: DPLL struct clk
|
|
|
|
*
|
|
|
|
* Recalculate and propagate the DPLL rate.
|
|
|
|
*/
|
2012-11-10 23:58:41 +00:00
|
|
|
unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate)
|
|
|
|
{
|
|
|
|
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
|
2012-11-11 02:32:46 +00:00
|
|
|
|
2010-01-27 03:13:11 +00:00
|
|
|
return omap2_get_dpll_rate(clk);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Non-CORE DPLL (e.g., DPLLs that do not control SDRC) clock functions */
|
|
|
|
|
2009-12-09 01:47:16 +00:00
|
|
|
/**
|
|
|
|
* omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode
|
|
|
|
* @clk: pointer to a DPLL struct clk
|
|
|
|
*
|
|
|
|
* Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock.
|
|
|
|
* The choice of modes depends on the DPLL's programmed rate: if it is
|
|
|
|
* the same as the DPLL's parent clock, it will enter bypass;
|
|
|
|
* otherwise, it will enter lock. This code will wait for the DPLL to
|
|
|
|
* indicate readiness before returning, unless the DPLL takes too long
|
|
|
|
* to enter the target state. Intended to be used as the struct clk's
|
|
|
|
* enable function. If DPLL3 was passed in, or the DPLL does not
|
|
|
|
* support low-power stop, or if the DPLL took too long to enter
|
|
|
|
* bypass or lock, return -EINVAL; otherwise, return 0.
|
|
|
|
*/
|
2012-11-10 23:58:41 +00:00
|
|
|
int omap3_noncore_dpll_enable(struct clk_hw *hw)
|
|
|
|
{
|
|
|
|
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
|
2009-12-09 01:47:16 +00:00
|
|
|
int r;
|
|
|
|
struct dpll_data *dd;
|
2012-09-22 08:24:17 +00:00
|
|
|
struct clk *parent;
|
2009-12-09 01:47:16 +00:00
|
|
|
|
|
|
|
dd = clk->dpll_data;
|
|
|
|
if (!dd)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2012-11-10 23:58:41 +00:00
|
|
|
if (clk->clkdm) {
|
|
|
|
r = clkdm_clk_enable(clk->clkdm, hw->clk);
|
|
|
|
if (r) {
|
|
|
|
WARN(1,
|
|
|
|
"%s: could not enable %s's clockdomain %s: %d\n",
|
|
|
|
__func__, __clk_get_name(hw->clk),
|
|
|
|
clk->clkdm->name, r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
parent = __clk_get_parent(hw->clk);
|
|
|
|
|
|
|
|
if (__clk_get_rate(hw->clk) == __clk_get_rate(dd->clk_bypass)) {
|
2012-09-22 08:24:17 +00:00
|
|
|
WARN_ON(parent != dd->clk_bypass);
|
2009-12-09 01:47:16 +00:00
|
|
|
r = _omap3_noncore_dpll_bypass(clk);
|
|
|
|
} else {
|
2012-09-22 08:24:17 +00:00
|
|
|
WARN_ON(parent != dd->clk_ref);
|
2009-12-09 01:47:16 +00:00
|
|
|
r = _omap3_noncore_dpll_lock(clk);
|
|
|
|
}
|
2012-11-10 23:58:41 +00:00
|
|
|
|
2009-12-09 01:47:16 +00:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop
|
|
|
|
* @clk: pointer to a DPLL struct clk
|
|
|
|
*
|
|
|
|
* Instructs a non-CORE DPLL to enter low-power stop. This function is
|
|
|
|
* intended for use in struct clkops. No return value.
|
|
|
|
*/
|
2012-11-10 23:58:41 +00:00
|
|
|
void omap3_noncore_dpll_disable(struct clk_hw *hw)
|
|
|
|
{
|
|
|
|
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
|
|
|
|
|
|
|
|
_omap3_noncore_dpll_stop(clk);
|
|
|
|
if (clk->clkdm)
|
|
|
|
clkdm_clk_disable(clk->clkdm, hw->clk);
|
2009-12-09 01:47:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Non-CORE DPLL rate set code */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* omap3_noncore_dpll_set_rate - set non-core DPLL rate
|
|
|
|
* @clk: struct clk * of DPLL to set
|
|
|
|
* @rate: rounded target rate
|
|
|
|
*
|
|
|
|
* Set the DPLL CLKOUT to the target rate. If the DPLL can enter
|
|
|
|
* low-power bypass, and the target rate is the bypass source clock
|
|
|
|
* rate, then configure the DPLL for bypass. Otherwise, round the
|
|
|
|
* target rate if it hasn't been done already, then program and lock
|
|
|
|
* the DPLL. Returns -EINVAL upon error, or 0 upon success.
|
|
|
|
*/
|
2012-11-10 23:58:41 +00:00
|
|
|
int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
|
|
|
|
unsigned long parent_rate)
|
|
|
|
{
|
|
|
|
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
|
|
|
|
struct clk *new_parent = NULL;
|
2014-07-25 12:11:15 +00:00
|
|
|
unsigned long rrate;
|
2012-11-10 23:58:41 +00:00
|
|
|
u16 freqsel = 0;
|
|
|
|
struct dpll_data *dd;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!hw || !rate)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
dd = clk->dpll_data;
|
|
|
|
if (!dd)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (__clk_get_rate(dd->clk_bypass) == rate &&
|
|
|
|
(dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
|
|
|
|
pr_debug("%s: %s: set rate: entering bypass.\n",
|
|
|
|
__func__, __clk_get_name(hw->clk));
|
|
|
|
|
ARM: OMAP: dpll: enable bypass clock only when attempting dpll bypass
omap3_noncore_dpll_set_rate() attempts an enable of bypass clk as well
as ref clk for every .set_rate attempt on a noncore DPLL, regardless of
whether the .set_rate results in the DPLL being locked or put in bypass.
Early at boot, while some of these DPLLs are programmed and locked
(using .set_rate for the DPLL), this causes an ordering issue.
For instance, on OMAP5, the USB DPLL derives its bypass clk from ABE DPLL.
If a .set_rate of USB DPLL which programmes the M,N and locks it is called
before the one for ABE, the enable of USB bypass clk (derived from ABE DPLL)
then attempts to lock the ABE DPLL and fails as the M,N values for ABE
are yet to be programmed.
To get rid of this ordering needs, enable bypass clk for a DPLL as part
of its .set_rate only when its being put in bypass, and only enable the
ref clk when its locked.
Reported-by: Roger Quadros <rogerq@ti.com>
Signed-off-by: Rajendra Nayak <rnayak@ti.com>
Signed-off-by: Paul Walmsley <paul@pwsan.com>
2013-04-01 02:22:23 +00:00
|
|
|
__clk_prepare(dd->clk_bypass);
|
|
|
|
clk_enable(dd->clk_bypass);
|
2012-11-10 23:58:41 +00:00
|
|
|
ret = _omap3_noncore_dpll_bypass(clk);
|
|
|
|
if (!ret)
|
|
|
|
new_parent = dd->clk_bypass;
|
ARM: OMAP: dpll: enable bypass clock only when attempting dpll bypass
omap3_noncore_dpll_set_rate() attempts an enable of bypass clk as well
as ref clk for every .set_rate attempt on a noncore DPLL, regardless of
whether the .set_rate results in the DPLL being locked or put in bypass.
Early at boot, while some of these DPLLs are programmed and locked
(using .set_rate for the DPLL), this causes an ordering issue.
For instance, on OMAP5, the USB DPLL derives its bypass clk from ABE DPLL.
If a .set_rate of USB DPLL which programmes the M,N and locks it is called
before the one for ABE, the enable of USB bypass clk (derived from ABE DPLL)
then attempts to lock the ABE DPLL and fails as the M,N values for ABE
are yet to be programmed.
To get rid of this ordering needs, enable bypass clk for a DPLL as part
of its .set_rate only when its being put in bypass, and only enable the
ref clk when its locked.
Reported-by: Roger Quadros <rogerq@ti.com>
Signed-off-by: Rajendra Nayak <rnayak@ti.com>
Signed-off-by: Paul Walmsley <paul@pwsan.com>
2013-04-01 02:22:23 +00:00
|
|
|
clk_disable(dd->clk_bypass);
|
|
|
|
__clk_unprepare(dd->clk_bypass);
|
2012-11-10 23:58:41 +00:00
|
|
|
} else {
|
ARM: OMAP: dpll: enable bypass clock only when attempting dpll bypass
omap3_noncore_dpll_set_rate() attempts an enable of bypass clk as well
as ref clk for every .set_rate attempt on a noncore DPLL, regardless of
whether the .set_rate results in the DPLL being locked or put in bypass.
Early at boot, while some of these DPLLs are programmed and locked
(using .set_rate for the DPLL), this causes an ordering issue.
For instance, on OMAP5, the USB DPLL derives its bypass clk from ABE DPLL.
If a .set_rate of USB DPLL which programmes the M,N and locks it is called
before the one for ABE, the enable of USB bypass clk (derived from ABE DPLL)
then attempts to lock the ABE DPLL and fails as the M,N values for ABE
are yet to be programmed.
To get rid of this ordering needs, enable bypass clk for a DPLL as part
of its .set_rate only when its being put in bypass, and only enable the
ref clk when its locked.
Reported-by: Roger Quadros <rogerq@ti.com>
Signed-off-by: Rajendra Nayak <rnayak@ti.com>
Signed-off-by: Paul Walmsley <paul@pwsan.com>
2013-04-01 02:22:23 +00:00
|
|
|
__clk_prepare(dd->clk_ref);
|
|
|
|
clk_enable(dd->clk_ref);
|
|
|
|
|
2014-07-25 12:11:15 +00:00
|
|
|
/* XXX this check is probably pointless in the CCF context */
|
|
|
|
if (dd->last_rounded_rate != rate) {
|
|
|
|
rrate = __clk_round_rate(hw->clk, rate);
|
|
|
|
if (rrate != rate) {
|
|
|
|
pr_warn("%s: %s: final rate %lu does not match desired rate %lu\n",
|
|
|
|
__func__, __clk_get_name(hw->clk),
|
|
|
|
rrate, rate);
|
|
|
|
rate = rrate;
|
|
|
|
}
|
|
|
|
}
|
2012-11-10 23:58:41 +00:00
|
|
|
|
|
|
|
if (dd->last_rounded_rate == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2013-01-29 13:03:49 +00:00
|
|
|
/* Freqsel is available only on OMAP343X devices */
|
2014-07-02 08:47:43 +00:00
|
|
|
if (ti_clk_features.flags & TI_CLK_DPLL_HAS_FREQSEL) {
|
2012-11-10 23:58:41 +00:00
|
|
|
freqsel = _omap3_dpll_compute_freqsel(clk,
|
|
|
|
dd->last_rounded_n);
|
2012-12-16 19:30:02 +00:00
|
|
|
WARN_ON(!freqsel);
|
2012-11-10 23:58:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pr_debug("%s: %s: set rate: locking rate to %lu.\n",
|
|
|
|
__func__, __clk_get_name(hw->clk), rate);
|
|
|
|
|
2012-12-15 08:35:46 +00:00
|
|
|
ret = omap3_noncore_dpll_program(clk, freqsel);
|
2012-11-10 23:58:41 +00:00
|
|
|
if (!ret)
|
|
|
|
new_parent = dd->clk_ref;
|
ARM: OMAP: dpll: enable bypass clock only when attempting dpll bypass
omap3_noncore_dpll_set_rate() attempts an enable of bypass clk as well
as ref clk for every .set_rate attempt on a noncore DPLL, regardless of
whether the .set_rate results in the DPLL being locked or put in bypass.
Early at boot, while some of these DPLLs are programmed and locked
(using .set_rate for the DPLL), this causes an ordering issue.
For instance, on OMAP5, the USB DPLL derives its bypass clk from ABE DPLL.
If a .set_rate of USB DPLL which programmes the M,N and locks it is called
before the one for ABE, the enable of USB bypass clk (derived from ABE DPLL)
then attempts to lock the ABE DPLL and fails as the M,N values for ABE
are yet to be programmed.
To get rid of this ordering needs, enable bypass clk for a DPLL as part
of its .set_rate only when its being put in bypass, and only enable the
ref clk when its locked.
Reported-by: Roger Quadros <rogerq@ti.com>
Signed-off-by: Rajendra Nayak <rnayak@ti.com>
Signed-off-by: Paul Walmsley <paul@pwsan.com>
2013-04-01 02:22:23 +00:00
|
|
|
clk_disable(dd->clk_ref);
|
|
|
|
__clk_unprepare(dd->clk_ref);
|
2012-11-10 23:58:41 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* FIXME - this is all wrong. common code handles reparenting and
|
|
|
|
* migrating prepare/enable counts. dplls should be a multiplexer
|
|
|
|
* clock and this should be a set_parent operation so that all of that
|
|
|
|
* stuff is inherited for free
|
|
|
|
*/
|
|
|
|
|
2014-02-28 19:43:47 +00:00
|
|
|
if (!ret && clk_get_parent(hw->clk) != new_parent)
|
2012-11-10 23:58:41 +00:00
|
|
|
__clk_reparent(hw->clk, new_parent);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2009-12-09 01:47:16 +00:00
|
|
|
|
|
|
|
/* DPLL autoidle read/set code */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* omap3_dpll_autoidle_read - read a DPLL's autoidle bits
|
|
|
|
* @clk: struct clk * of the DPLL to read
|
|
|
|
*
|
|
|
|
* Return the DPLL's autoidle bits, shifted down to bit 0. Returns
|
|
|
|
* -EINVAL if passed a null pointer or if the struct clk does not
|
|
|
|
* appear to refer to a DPLL.
|
|
|
|
*/
|
2012-11-10 23:58:41 +00:00
|
|
|
u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk)
|
2009-12-09 01:47:16 +00:00
|
|
|
{
|
|
|
|
const struct dpll_data *dd;
|
|
|
|
u32 v;
|
|
|
|
|
|
|
|
if (!clk || !clk->dpll_data)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
dd = clk->dpll_data;
|
|
|
|
|
2012-05-08 05:55:30 +00:00
|
|
|
if (!dd->autoidle_reg)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2013-10-22 08:49:58 +00:00
|
|
|
v = omap2_clk_readl(clk, dd->autoidle_reg);
|
2009-12-09 01:47:16 +00:00
|
|
|
v &= dd->autoidle_mask;
|
|
|
|
v >>= __ffs(dd->autoidle_mask);
|
|
|
|
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* omap3_dpll_allow_idle - enable DPLL autoidle bits
|
|
|
|
* @clk: struct clk * of the DPLL to operate on
|
|
|
|
*
|
|
|
|
* Enable DPLL automatic idle control. This automatic idle mode
|
|
|
|
* switching takes effect only when the DPLL is locked, at least on
|
|
|
|
* OMAP3430. The DPLL will enter low-power stop when its downstream
|
|
|
|
* clocks are gated. No return value.
|
|
|
|
*/
|
2012-11-10 23:58:41 +00:00
|
|
|
void omap3_dpll_allow_idle(struct clk_hw_omap *clk)
|
2009-12-09 01:47:16 +00:00
|
|
|
{
|
|
|
|
const struct dpll_data *dd;
|
|
|
|
u32 v;
|
|
|
|
|
|
|
|
if (!clk || !clk->dpll_data)
|
|
|
|
return;
|
|
|
|
|
|
|
|
dd = clk->dpll_data;
|
|
|
|
|
2012-11-11 02:32:46 +00:00
|
|
|
if (!dd->autoidle_reg)
|
2012-05-08 05:55:30 +00:00
|
|
|
return;
|
|
|
|
|
2009-12-09 01:47:16 +00:00
|
|
|
/*
|
|
|
|
* REVISIT: CORE DPLL can optionally enter low-power bypass
|
|
|
|
* by writing 0x5 instead of 0x1. Add some mechanism to
|
|
|
|
* optionally enter this mode.
|
|
|
|
*/
|
2013-10-22 08:49:58 +00:00
|
|
|
v = omap2_clk_readl(clk, dd->autoidle_reg);
|
2009-12-09 01:47:16 +00:00
|
|
|
v &= ~dd->autoidle_mask;
|
|
|
|
v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask);
|
2013-10-22 08:49:58 +00:00
|
|
|
omap2_clk_writel(v, clk, dd->autoidle_reg);
|
2012-05-08 05:55:30 +00:00
|
|
|
|
2009-12-09 01:47:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* omap3_dpll_deny_idle - prevent DPLL from automatically idling
|
|
|
|
* @clk: struct clk * of the DPLL to operate on
|
|
|
|
*
|
|
|
|
* Disable DPLL automatic idle control. No return value.
|
|
|
|
*/
|
2012-11-10 23:58:41 +00:00
|
|
|
void omap3_dpll_deny_idle(struct clk_hw_omap *clk)
|
2009-12-09 01:47:16 +00:00
|
|
|
{
|
|
|
|
const struct dpll_data *dd;
|
|
|
|
u32 v;
|
|
|
|
|
|
|
|
if (!clk || !clk->dpll_data)
|
|
|
|
return;
|
|
|
|
|
|
|
|
dd = clk->dpll_data;
|
|
|
|
|
2012-11-11 02:32:46 +00:00
|
|
|
if (!dd->autoidle_reg)
|
2012-05-08 05:55:30 +00:00
|
|
|
return;
|
|
|
|
|
2013-10-22 08:49:58 +00:00
|
|
|
v = omap2_clk_readl(clk, dd->autoidle_reg);
|
2009-12-09 01:47:16 +00:00
|
|
|
v &= ~dd->autoidle_mask;
|
|
|
|
v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask);
|
2013-10-22 08:49:58 +00:00
|
|
|
omap2_clk_writel(v, clk, dd->autoidle_reg);
|
2009-12-09 01:47:16 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clock control for DPLL outputs */
|
|
|
|
|
2014-01-30 11:17:20 +00:00
|
|
|
/* Find the parent DPLL for the given clkoutx2 clock */
|
|
|
|
static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
|
|
|
|
{
|
|
|
|
struct clk_hw_omap *pclk = NULL;
|
|
|
|
struct clk *parent;
|
|
|
|
|
|
|
|
/* Walk up the parents of clk, looking for a DPLL */
|
|
|
|
do {
|
|
|
|
do {
|
|
|
|
parent = __clk_get_parent(hw->clk);
|
|
|
|
hw = __clk_get_hw(parent);
|
|
|
|
} while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC));
|
|
|
|
if (!hw)
|
|
|
|
break;
|
|
|
|
pclk = to_clk_hw_omap(hw);
|
|
|
|
} while (pclk && !pclk->dpll_data);
|
|
|
|
|
|
|
|
/* clk does not have a DPLL as a parent? error in the clock data */
|
|
|
|
if (!pclk) {
|
|
|
|
WARN_ON(1);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return pclk;
|
|
|
|
}
|
|
|
|
|
2009-12-09 01:47:16 +00:00
|
|
|
/**
|
|
|
|
* omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
|
|
|
|
* @clk: DPLL output struct clk
|
|
|
|
*
|
|
|
|
* Using parent clock DPLL data, look up DPLL state. If locked, set our
|
|
|
|
* rate to the dpll_clk * 2; otherwise, just use dpll_clk.
|
|
|
|
*/
|
2012-11-10 23:58:41 +00:00
|
|
|
unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
|
|
|
|
unsigned long parent_rate)
|
|
|
|
{
|
|
|
|
const struct dpll_data *dd;
|
|
|
|
unsigned long rate;
|
|
|
|
u32 v;
|
|
|
|
struct clk_hw_omap *pclk = NULL;
|
|
|
|
|
2013-10-22 08:49:58 +00:00
|
|
|
if (!parent_rate)
|
|
|
|
return 0;
|
|
|
|
|
2014-01-30 11:17:20 +00:00
|
|
|
pclk = omap3_find_clkoutx2_dpll(hw);
|
2009-12-09 01:47:16 +00:00
|
|
|
|
2014-01-30 11:17:20 +00:00
|
|
|
if (!pclk)
|
2012-08-03 15:21:10 +00:00
|
|
|
return 0;
|
2009-12-09 01:47:16 +00:00
|
|
|
|
|
|
|
dd = pclk->dpll_data;
|
|
|
|
|
|
|
|
WARN_ON(!dd->enable_mask);
|
|
|
|
|
2013-10-22 08:49:58 +00:00
|
|
|
v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask;
|
2009-12-09 01:47:16 +00:00
|
|
|
v >>= __ffs(dd->enable_mask);
|
2010-02-23 05:09:08 +00:00
|
|
|
if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE))
|
2012-09-22 08:24:17 +00:00
|
|
|
rate = parent_rate;
|
2009-12-09 01:47:16 +00:00
|
|
|
else
|
2012-09-22 08:24:17 +00:00
|
|
|
rate = parent_rate * 2;
|
2009-12-09 01:47:16 +00:00
|
|
|
return rate;
|
|
|
|
}
|
2012-07-05 15:05:15 +00:00
|
|
|
|
2014-01-30 11:17:20 +00:00
|
|
|
int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
|
|
|
|
unsigned long parent_rate)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
|
|
|
|
unsigned long *prate)
|
|
|
|
{
|
|
|
|
const struct dpll_data *dd;
|
|
|
|
u32 v;
|
|
|
|
struct clk_hw_omap *pclk = NULL;
|
|
|
|
|
|
|
|
if (!*prate)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pclk = omap3_find_clkoutx2_dpll(hw);
|
|
|
|
|
|
|
|
if (!pclk)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
dd = pclk->dpll_data;
|
|
|
|
|
|
|
|
/* TYPE J does not have a clkoutx2 */
|
|
|
|
if (dd->flags & DPLL_J_TYPE) {
|
|
|
|
*prate = __clk_round_rate(__clk_get_parent(pclk->hw.clk), rate);
|
|
|
|
return *prate;
|
|
|
|
}
|
|
|
|
|
|
|
|
WARN_ON(!dd->enable_mask);
|
|
|
|
|
|
|
|
v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask;
|
|
|
|
v >>= __ffs(dd->enable_mask);
|
|
|
|
|
|
|
|
/* If in bypass, the rate is fixed to the bypass rate*/
|
|
|
|
if (v != OMAP3XXX_EN_DPLL_LOCKED)
|
|
|
|
return *prate;
|
|
|
|
|
|
|
|
if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
|
|
|
|
unsigned long best_parent;
|
|
|
|
|
|
|
|
best_parent = (rate / 2);
|
|
|
|
*prate = __clk_round_rate(__clk_get_parent(hw->clk),
|
|
|
|
best_parent);
|
|
|
|
}
|
|
|
|
|
|
|
|
return *prate * 2;
|
|
|
|
}
|
|
|
|
|
2012-07-05 15:05:15 +00:00
|
|
|
/* OMAP3/4 non-CORE DPLL clkops */
|
2012-11-10 23:58:41 +00:00
|
|
|
const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
|
|
|
|
.allow_idle = omap3_dpll_allow_idle,
|
|
|
|
.deny_idle = omap3_dpll_deny_idle,
|
|
|
|
};
|