mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 22:02:28 +00:00
OPP: Remove genpd_virt_dev_lock
All the config operations for OPP tables share common code paths now and none of the other ones have such protection in place. Either all should have it or none. The understanding here is that user won't clear the OPP configs while still using them and so such a case won't happen. We can always come back and use a wider lock for all resource types if required. Also fix the error on failing to allocate memory. Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
This commit is contained in:
parent
c2bebf9804
commit
48b5aaec59
@ -1076,7 +1076,7 @@ static int _opp_set_required_opps_genpd(struct device *dev,
|
||||
{
|
||||
struct device **genpd_virt_devs =
|
||||
opp_table->genpd_virt_devs ? opp_table->genpd_virt_devs : &dev;
|
||||
int index, target, delta, ret = 0;
|
||||
int index, target, delta, ret;
|
||||
|
||||
/* Scaling up? Set required OPPs in normal order, else reverse */
|
||||
if (!scaling_down) {
|
||||
@ -1089,23 +1089,15 @@ static int _opp_set_required_opps_genpd(struct device *dev,
|
||||
delta = -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Acquire genpd_virt_dev_lock to make sure we don't use a genpd_dev
|
||||
* after it is freed from another thread.
|
||||
*/
|
||||
mutex_lock(&opp_table->genpd_virt_dev_lock);
|
||||
|
||||
while (index != target) {
|
||||
ret = _set_performance_state(dev, genpd_virt_devs[index], opp, index);
|
||||
if (ret)
|
||||
break;
|
||||
return ret;
|
||||
|
||||
index += delta;
|
||||
}
|
||||
|
||||
mutex_unlock(&opp_table->genpd_virt_dev_lock);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This is only called for PM domain for now */
|
||||
@ -1474,7 +1466,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mutex_init(&opp_table->lock);
|
||||
mutex_init(&opp_table->genpd_virt_dev_lock);
|
||||
INIT_LIST_HEAD(&opp_table->dev_list);
|
||||
INIT_LIST_HEAD(&opp_table->lazy);
|
||||
|
||||
@ -1510,7 +1501,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
|
||||
remove_opp_dev:
|
||||
_of_clear_opp_table(opp_table);
|
||||
_remove_opp_dev(opp_dev, opp_table);
|
||||
mutex_destroy(&opp_table->genpd_virt_dev_lock);
|
||||
mutex_destroy(&opp_table->lock);
|
||||
err:
|
||||
kfree(opp_table);
|
||||
@ -1678,7 +1668,6 @@ static void _opp_table_kref_release(struct kref *kref)
|
||||
list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node)
|
||||
_remove_opp_dev(opp_dev, opp_table);
|
||||
|
||||
mutex_destroy(&opp_table->genpd_virt_dev_lock);
|
||||
mutex_destroy(&opp_table->lock);
|
||||
kfree(opp_table);
|
||||
}
|
||||
@ -2395,7 +2384,7 @@ static void _opp_put_config_regulators_helper(struct opp_table *opp_table)
|
||||
opp_table->config_regulators = NULL;
|
||||
}
|
||||
|
||||
static void _detach_genpd(struct opp_table *opp_table)
|
||||
static void _opp_detach_genpd(struct opp_table *opp_table)
|
||||
{
|
||||
int index;
|
||||
|
||||
@ -2449,13 +2438,11 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
|
||||
if (!opp_table->required_opp_count)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
mutex_lock(&opp_table->genpd_virt_dev_lock);
|
||||
|
||||
opp_table->genpd_virt_devs = kcalloc(opp_table->required_opp_count,
|
||||
sizeof(*opp_table->genpd_virt_devs),
|
||||
GFP_KERNEL);
|
||||
if (!opp_table->genpd_virt_devs)
|
||||
goto unlock;
|
||||
return -ENOMEM;
|
||||
|
||||
while (*name) {
|
||||
if (index >= opp_table->required_opp_count) {
|
||||
@ -2478,29 +2465,15 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
|
||||
|
||||
if (virt_devs)
|
||||
*virt_devs = opp_table->genpd_virt_devs;
|
||||
mutex_unlock(&opp_table->genpd_virt_dev_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
_detach_genpd(opp_table);
|
||||
unlock:
|
||||
mutex_unlock(&opp_table->genpd_virt_dev_lock);
|
||||
_opp_detach_genpd(opp_table);
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
static void _opp_detach_genpd(struct opp_table *opp_table)
|
||||
{
|
||||
/*
|
||||
* Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting
|
||||
* used in parallel.
|
||||
*/
|
||||
mutex_lock(&opp_table->genpd_virt_dev_lock);
|
||||
_detach_genpd(opp_table);
|
||||
mutex_unlock(&opp_table->genpd_virt_dev_lock);
|
||||
}
|
||||
|
||||
static void _opp_clear_config(struct opp_config_data *data)
|
||||
{
|
||||
if (data->flags & OPP_CONFIG_GENPD)
|
||||
|
@ -160,7 +160,6 @@ enum opp_table_access {
|
||||
* @rate_clk_single: Currently configured frequency for single clk.
|
||||
* @current_opp: Currently configured OPP for the table.
|
||||
* @suspend_opp: Pointer to OPP to be used during device suspend.
|
||||
* @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers.
|
||||
* @genpd_virt_devs: List of virtual devices for multiple genpd support.
|
||||
* @required_opp_tables: List of device OPP tables that are required by OPPs in
|
||||
* this table.
|
||||
@ -212,7 +211,6 @@ struct opp_table {
|
||||
struct dev_pm_opp *current_opp;
|
||||
struct dev_pm_opp *suspend_opp;
|
||||
|
||||
struct mutex genpd_virt_dev_lock;
|
||||
struct device **genpd_virt_devs;
|
||||
struct opp_table **required_opp_tables;
|
||||
unsigned int required_opp_count;
|
||||
|
Loading…
Reference in New Issue
Block a user