clk: move some parent related functions upwards

Move some parent related functions up in clk.c so they can be used by
the modifications in the following patch which enables clock reparenting
during set_rate. No other changes are made so this patch makes no
functional difference in isolation. This is separate from the following
patch primarily to ease readability of that patch.

Signed-off-by: James Hogan <james.hogan@imgtec.com>
Reviewed-by: Stephen Boyd <sboyd@codeaurora.org>
Cc: Mike Turquette <mturquette@linaro.org>
Cc: linux-arm-kernel@lists.infradead.org
Signed-off-by: Mike Turquette <mturquette@linaro.org>
This commit is contained in:
James Hogan 2013-07-29 12:24:59 +01:00 committed by Mike Turquette
parent 7ef3dcc814
commit 4935b22c46

View File

@ -1028,6 +1028,110 @@ unsigned long clk_get_rate(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_get_rate);
static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
{
u8 i;
if (!clk->parents)
clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
GFP_KERNEL);
/*
* find index of new parent clock using cached parent ptrs,
* or if not yet cached, use string name comparison and cache
* them now to avoid future calls to __clk_lookup.
*/
for (i = 0; i < clk->num_parents; i++) {
if (clk->parents && clk->parents[i] == parent)
break;
else if (!strcmp(clk->parent_names[i], parent->name)) {
if (clk->parents)
clk->parents[i] = __clk_lookup(parent->name);
break;
}
}
return i;
}
static void clk_reparent(struct clk *clk, struct clk *new_parent)
{
hlist_del(&clk->child_node);
if (new_parent)
hlist_add_head(&clk->child_node, &new_parent->children);
else
hlist_add_head(&clk->child_node, &clk_orphan_list);
clk->parent = new_parent;
}
static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
{
unsigned long flags;
int ret = 0;
struct clk *old_parent = clk->parent;
/*
* Migrate prepare state between parents and prevent race with
* clk_enable().
*
* If the clock is not prepared, then a race with
* clk_enable/disable() is impossible since we already have the
* prepare lock (future calls to clk_enable() need to be preceded by
* a clk_prepare()).
*
* If the clock is prepared, migrate the prepared state to the new
* parent and also protect against a race with clk_enable() by
* forcing the clock and the new parent on. This ensures that all
* future calls to clk_enable() are practically NOPs with respect to
* hardware and software states.
*
* See also: Comment for clk_set_parent() below.
*/
if (clk->prepare_count) {
__clk_prepare(parent);
clk_enable(parent);
clk_enable(clk);
}
/* update the clk tree topology */
flags = clk_enable_lock();
clk_reparent(clk, parent);
clk_enable_unlock(flags);
/* change clock input source */
if (parent && clk->ops->set_parent)
ret = clk->ops->set_parent(clk->hw, p_index);
if (ret) {
flags = clk_enable_lock();
clk_reparent(clk, old_parent);
clk_enable_unlock(flags);
if (clk->prepare_count) {
clk_disable(clk);
clk_disable(parent);
__clk_unprepare(parent);
}
return ret;
}
/*
* Finish the migration of prepare state and undo the changes done
* for preventing a race with clk_enable().
*/
if (clk->prepare_count) {
clk_disable(clk);
clk_disable(old_parent);
__clk_unprepare(old_parent);
}
/* update debugfs with new clk tree topology */
clk_debug_reparent(clk, parent);
return 0;
}
/**
* __clk_speculate_rates
* @clk: first clk in the subtree
@ -1335,18 +1439,6 @@ out:
return ret;
}
static void clk_reparent(struct clk *clk, struct clk *new_parent)
{
hlist_del(&clk->child_node);
if (new_parent)
hlist_add_head(&clk->child_node, &new_parent->children);
else
hlist_add_head(&clk->child_node, &clk_orphan_list);
clk->parent = new_parent;
}
void __clk_reparent(struct clk *clk, struct clk *new_parent)
{
clk_reparent(clk, new_parent);
@ -1354,98 +1446,6 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent)
__clk_recalc_rates(clk, POST_RATE_CHANGE);
}
static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
{
u8 i;
if (!clk->parents)
clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
GFP_KERNEL);
/*
* find index of new parent clock using cached parent ptrs,
* or if not yet cached, use string name comparison and cache
* them now to avoid future calls to __clk_lookup.
*/
for (i = 0; i < clk->num_parents; i++) {
if (clk->parents && clk->parents[i] == parent)
break;
else if (!strcmp(clk->parent_names[i], parent->name)) {
if (clk->parents)
clk->parents[i] = __clk_lookup(parent->name);
break;
}
}
return i;
}
static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
{
unsigned long flags;
int ret = 0;
struct clk *old_parent = clk->parent;
/*
* Migrate prepare state between parents and prevent race with
* clk_enable().
*
* If the clock is not prepared, then a race with
* clk_enable/disable() is impossible since we already have the
* prepare lock (future calls to clk_enable() need to be preceded by
* a clk_prepare()).
*
* If the clock is prepared, migrate the prepared state to the new
* parent and also protect against a race with clk_enable() by
* forcing the clock and the new parent on. This ensures that all
* future calls to clk_enable() are practically NOPs with respect to
* hardware and software states.
*
* See also: Comment for clk_set_parent() below.
*/
if (clk->prepare_count) {
__clk_prepare(parent);
clk_enable(parent);
clk_enable(clk);
}
/* update the clk tree topology */
flags = clk_enable_lock();
clk_reparent(clk, parent);
clk_enable_unlock(flags);
/* change clock input source */
if (parent && clk->ops->set_parent)
ret = clk->ops->set_parent(clk->hw, p_index);
if (ret) {
flags = clk_enable_lock();
clk_reparent(clk, old_parent);
clk_enable_unlock(flags);
if (clk->prepare_count) {
clk_disable(clk);
clk_disable(parent);
__clk_unprepare(parent);
}
return ret;
}
/*
* Finish the migration of prepare state and undo the changes done
* for preventing a race with clk_enable().
*/
if (clk->prepare_count) {
clk_disable(clk);
clk_disable(old_parent);
__clk_unprepare(old_parent);
}
/* update debugfs with new clk tree topology */
clk_debug_reparent(clk, parent);
return 0;
}
/**
* clk_set_parent - switch the parent of a mux clk
* @clk: the mux clk whose input we are switching