2014-12-22 09:11:39 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
* DEALINGS IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
#include "priv.h"
|
|
|
|
|
2015-01-13 13:37:38 +00:00
|
|
|
#include <subdev/clk.h>
|
2014-12-22 09:11:39 +00:00
|
|
|
#include <subdev/timer.h>
|
|
|
|
#include <subdev/volt.h>
|
|
|
|
|
|
|
|
#define BUSY_SLOT 0
|
|
|
|
#define CLK_SLOT 7
|
|
|
|
|
2015-01-13 14:04:21 +00:00
|
|
|
struct gk20a_pmu_dvfs_data {
|
2014-12-22 09:11:39 +00:00
|
|
|
int p_load_target;
|
|
|
|
int p_load_max;
|
|
|
|
int p_smooth;
|
|
|
|
unsigned int avg_load;
|
|
|
|
};
|
|
|
|
|
2015-01-13 14:04:21 +00:00
|
|
|
struct gk20a_pmu_priv {
|
2015-01-14 05:10:40 +00:00
|
|
|
struct nvkm_pmu base;
|
|
|
|
struct nvkm_alarm alarm;
|
2015-01-13 14:04:21 +00:00
|
|
|
struct gk20a_pmu_dvfs_data *data;
|
2014-12-22 09:11:39 +00:00
|
|
|
};
|
|
|
|
|
2015-01-13 14:04:21 +00:00
|
|
|
struct gk20a_pmu_dvfs_dev_status {
|
2014-12-22 09:11:39 +00:00
|
|
|
unsigned long total;
|
|
|
|
unsigned long busy;
|
|
|
|
int cur_state;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
2015-01-13 14:04:21 +00:00
|
|
|
gk20a_pmu_dvfs_target(struct gk20a_pmu_priv *priv, int *state)
|
2014-12-22 09:11:39 +00:00
|
|
|
{
|
2015-01-14 05:10:40 +00:00
|
|
|
struct nvkm_clk *clk = nvkm_clk(priv);
|
2014-12-22 09:11:39 +00:00
|
|
|
|
2015-01-14 05:10:40 +00:00
|
|
|
return nvkm_clk_astate(clk, *state, 0, false);
|
2014-12-22 09:11:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2015-01-13 14:04:21 +00:00
|
|
|
gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu_priv *priv, int *state)
|
2014-12-22 09:11:39 +00:00
|
|
|
{
|
2015-01-14 05:10:40 +00:00
|
|
|
struct nvkm_clk *clk = nvkm_clk(priv);
|
2014-12-22 09:11:39 +00:00
|
|
|
|
|
|
|
*state = clk->pstate;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2015-01-13 14:04:21 +00:00
|
|
|
gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu_priv *priv,
|
2015-01-14 05:10:40 +00:00
|
|
|
int *state, int load)
|
2014-12-22 09:11:39 +00:00
|
|
|
{
|
2015-01-13 14:04:21 +00:00
|
|
|
struct gk20a_pmu_dvfs_data *data = priv->data;
|
2015-01-14 05:10:40 +00:00
|
|
|
struct nvkm_clk *clk = nvkm_clk(priv);
|
2014-12-22 09:11:39 +00:00
|
|
|
int cur_level, level;
|
|
|
|
|
|
|
|
/* For GK20A, the performance level is directly mapped to pstate */
|
|
|
|
level = cur_level = clk->pstate;
|
|
|
|
|
|
|
|
if (load > data->p_load_max) {
|
|
|
|
level = min(clk->state_nr - 1, level + (clk->state_nr / 3));
|
|
|
|
} else {
|
|
|
|
level += ((load - data->p_load_target) * 10 /
|
|
|
|
data->p_load_target) / 2;
|
|
|
|
level = max(0, level);
|
|
|
|
level = min(clk->state_nr - 1, level);
|
|
|
|
}
|
|
|
|
|
|
|
|
nv_trace(priv, "cur level = %d, new level = %d\n", cur_level, level);
|
|
|
|
|
|
|
|
*state = level;
|
|
|
|
|
|
|
|
if (level == cur_level)
|
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2015-01-13 14:04:21 +00:00
|
|
|
gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu_priv *priv,
|
2015-01-14 05:10:40 +00:00
|
|
|
struct gk20a_pmu_dvfs_dev_status *status)
|
2014-12-22 09:11:39 +00:00
|
|
|
{
|
|
|
|
status->busy = nv_rd32(priv, 0x10a508 + (BUSY_SLOT * 0x10));
|
|
|
|
status->total= nv_rd32(priv, 0x10a508 + (CLK_SLOT * 0x10));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2015-01-13 14:04:21 +00:00
|
|
|
gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu_priv *priv)
|
2014-12-22 09:11:39 +00:00
|
|
|
{
|
|
|
|
nv_wr32(priv, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000);
|
|
|
|
nv_wr32(priv, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2015-01-14 05:10:40 +00:00
|
|
|
gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
|
2014-12-22 09:11:39 +00:00
|
|
|
{
|
2015-01-14 05:10:40 +00:00
|
|
|
struct gk20a_pmu_priv *priv =
|
|
|
|
container_of(alarm, struct gk20a_pmu_priv, alarm);
|
2015-01-13 14:04:21 +00:00
|
|
|
struct gk20a_pmu_dvfs_data *data = priv->data;
|
|
|
|
struct gk20a_pmu_dvfs_dev_status status;
|
2015-01-14 05:10:40 +00:00
|
|
|
struct nvkm_clk *clk = nvkm_clk(priv);
|
|
|
|
struct nvkm_volt *volt = nvkm_volt(priv);
|
2014-12-22 09:11:39 +00:00
|
|
|
u32 utilization = 0;
|
|
|
|
int state, ret;
|
|
|
|
|
|
|
|
/*
|
2015-01-13 14:04:21 +00:00
|
|
|
* The PMU is initialized before CLK and VOLT, so we have to make sure the
|
2014-12-22 09:11:39 +00:00
|
|
|
* CLK and VOLT are ready here.
|
|
|
|
*/
|
|
|
|
if (!clk || !volt)
|
|
|
|
goto resched;
|
|
|
|
|
2015-01-13 14:04:21 +00:00
|
|
|
ret = gk20a_pmu_dvfs_get_dev_status(priv, &status);
|
2014-12-22 09:11:39 +00:00
|
|
|
if (ret) {
|
|
|
|
nv_warn(priv, "failed to get device status\n");
|
|
|
|
goto resched;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (status.total)
|
|
|
|
utilization = div_u64((u64)status.busy * 100, status.total);
|
|
|
|
|
|
|
|
data->avg_load = (data->p_smooth * data->avg_load) + utilization;
|
|
|
|
data->avg_load /= data->p_smooth + 1;
|
|
|
|
nv_trace(priv, "utilization = %d %%, avg_load = %d %%\n",
|
|
|
|
utilization, data->avg_load);
|
|
|
|
|
2015-01-13 14:04:21 +00:00
|
|
|
ret = gk20a_pmu_dvfs_get_cur_state(priv, &state);
|
2014-12-22 09:11:39 +00:00
|
|
|
if (ret) {
|
|
|
|
nv_warn(priv, "failed to get current state\n");
|
|
|
|
goto resched;
|
|
|
|
}
|
|
|
|
|
2015-01-13 14:04:21 +00:00
|
|
|
if (gk20a_pmu_dvfs_get_target_state(priv, &state, data->avg_load)) {
|
2014-12-22 09:11:39 +00:00
|
|
|
nv_trace(priv, "set new state to %d\n", state);
|
2015-01-13 14:04:21 +00:00
|
|
|
gk20a_pmu_dvfs_target(priv, &state);
|
2014-12-22 09:11:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
resched:
|
2015-01-13 14:04:21 +00:00
|
|
|
gk20a_pmu_dvfs_reset_dev_status(priv);
|
2015-01-14 05:10:40 +00:00
|
|
|
nvkm_timer_alarm(priv, 100000000, alarm);
|
2014-12-22 09:11:39 +00:00
|
|
|
}
|
|
|
|
|
2015-03-20 10:41:06 +00:00
|
|
|
static int
|
2015-01-14 05:10:40 +00:00
|
|
|
gk20a_pmu_fini(struct nvkm_object *object, bool suspend)
|
2014-12-22 09:11:39 +00:00
|
|
|
{
|
2015-01-14 05:10:40 +00:00
|
|
|
struct nvkm_pmu *pmu = (void *)object;
|
2015-01-13 14:04:21 +00:00
|
|
|
struct gk20a_pmu_priv *priv = (void *)pmu;
|
2014-12-22 09:11:39 +00:00
|
|
|
|
2015-01-14 05:10:40 +00:00
|
|
|
nvkm_timer_alarm_cancel(priv, &priv->alarm);
|
2014-12-22 09:11:39 +00:00
|
|
|
|
2015-01-14 05:10:40 +00:00
|
|
|
return nvkm_subdev_fini(&pmu->base, suspend);
|
2014-12-22 09:11:39 +00:00
|
|
|
}
|
|
|
|
|
2015-03-20 10:41:06 +00:00
|
|
|
static int
|
2015-01-14 05:10:40 +00:00
|
|
|
gk20a_pmu_init(struct nvkm_object *object)
|
2014-12-22 09:11:39 +00:00
|
|
|
{
|
2015-01-14 05:10:40 +00:00
|
|
|
struct nvkm_pmu *pmu = (void *)object;
|
2015-01-13 14:04:21 +00:00
|
|
|
struct gk20a_pmu_priv *priv = (void *)pmu;
|
2014-12-22 09:11:39 +00:00
|
|
|
int ret;
|
|
|
|
|
2015-01-14 05:10:40 +00:00
|
|
|
ret = nvkm_subdev_init(&pmu->base);
|
2014-12-22 09:11:39 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2015-01-14 05:10:40 +00:00
|
|
|
pmu->pgob = nvkm_pmu_pgob;
|
2014-12-22 09:11:39 +00:00
|
|
|
|
|
|
|
/* init pwr perf counter */
|
2015-01-13 14:04:21 +00:00
|
|
|
nv_wr32(pmu, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001);
|
|
|
|
nv_wr32(pmu, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002);
|
|
|
|
nv_wr32(pmu, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003);
|
2014-12-22 09:11:39 +00:00
|
|
|
|
2015-01-14 05:10:40 +00:00
|
|
|
nvkm_timer_alarm(pmu, 2000000000, &priv->alarm);
|
2014-12-22 09:11:39 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-03-20 10:41:06 +00:00
|
|
|
static struct gk20a_pmu_dvfs_data
|
|
|
|
gk20a_dvfs_data= {
|
2014-12-22 09:11:39 +00:00
|
|
|
.p_load_target = 70,
|
|
|
|
.p_load_max = 90,
|
|
|
|
.p_smooth = 1,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
2015-01-14 05:10:40 +00:00
|
|
|
gk20a_pmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|
|
|
struct nvkm_oclass *oclass, void *data, u32 size,
|
|
|
|
struct nvkm_object **pobject)
|
2014-12-22 09:11:39 +00:00
|
|
|
{
|
2015-01-13 14:04:21 +00:00
|
|
|
struct gk20a_pmu_priv *priv;
|
2014-12-22 09:11:39 +00:00
|
|
|
int ret;
|
|
|
|
|
2015-01-14 05:10:40 +00:00
|
|
|
ret = nvkm_pmu_create(parent, engine, oclass, &priv);
|
2014-12-22 09:11:39 +00:00
|
|
|
*pobject = nv_object(priv);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
priv->data = &gk20a_dvfs_data;
|
|
|
|
|
2015-01-14 05:10:40 +00:00
|
|
|
nvkm_alarm_init(&priv->alarm, gk20a_pmu_dvfs_work);
|
2014-12-22 09:11:39 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-01-14 05:10:40 +00:00
|
|
|
struct nvkm_oclass *
|
2015-01-13 14:04:21 +00:00
|
|
|
gk20a_pmu_oclass = &(struct nvkm_pmu_impl) {
|
|
|
|
.base.handle = NV_SUBDEV(PMU, 0xea),
|
2015-01-14 05:10:40 +00:00
|
|
|
.base.ofuncs = &(struct nvkm_ofuncs) {
|
2015-01-13 14:04:21 +00:00
|
|
|
.ctor = gk20a_pmu_ctor,
|
2015-01-14 05:10:40 +00:00
|
|
|
.dtor = _nvkm_pmu_dtor,
|
2015-01-13 14:04:21 +00:00
|
|
|
.init = gk20a_pmu_init,
|
|
|
|
.fini = gk20a_pmu_fini,
|
2014-12-22 09:11:39 +00:00
|
|
|
},
|
|
|
|
}.base;
|