forked from Minki/linux
Merge branch 'pm-cpuidle'
* pm-cpuidle: cpuidle: delay enabling interrupts until all coupled CPUs leave idle cpuidle: poll state can measure residency cpuidle: Move perf multiplier calculation out of the selection loop cpuidle: Do not substract exit latency from assumed sleep length cpuidle: Ensure menu coefficients stay within domain cpuidle: Use actual state latency in menu governor cpuidle: rename expected_us to next_timer_us in menu governor
This commit is contained in:
commit
f71a822fc0
@ -85,7 +85,8 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
||||
|
||||
time_end = ktime_get();
|
||||
|
||||
local_irq_enable();
|
||||
if (!cpuidle_state_is_coupled(dev, drv, entered_state))
|
||||
local_irq_enable();
|
||||
|
||||
diff = ktime_to_us(ktime_sub(time_end, time_start));
|
||||
if (diff > INT_MAX)
|
||||
|
@ -209,7 +209,7 @@ static void poll_idle_init(struct cpuidle_driver *drv)
|
||||
state->exit_latency = 0;
|
||||
state->target_residency = 0;
|
||||
state->power_usage = -1;
|
||||
state->flags = 0;
|
||||
state->flags = CPUIDLE_FLAG_TIME_VALID;
|
||||
state->enter = poll_idle;
|
||||
state->disabled = false;
|
||||
}
|
||||
|
@ -122,9 +122,8 @@ struct menu_device {
|
||||
int last_state_idx;
|
||||
int needs_update;
|
||||
|
||||
unsigned int expected_us;
|
||||
unsigned int next_timer_us;
|
||||
unsigned int predicted_us;
|
||||
unsigned int exit_us;
|
||||
unsigned int bucket;
|
||||
unsigned int correction_factor[BUCKETS];
|
||||
unsigned int intervals[INTERVALS];
|
||||
@ -257,7 +256,7 @@ again:
|
||||
stddev = int_sqrt(stddev);
|
||||
if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
|
||||
|| stddev <= 20) {
|
||||
if (data->expected_us > avg)
|
||||
if (data->next_timer_us > avg)
|
||||
data->predicted_us = avg;
|
||||
return;
|
||||
}
|
||||
@ -289,7 +288,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
struct menu_device *data = &__get_cpu_var(menu_devices);
|
||||
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
|
||||
int i;
|
||||
int multiplier;
|
||||
unsigned int interactivity_req;
|
||||
struct timespec t;
|
||||
|
||||
if (data->needs_update) {
|
||||
@ -298,7 +297,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
}
|
||||
|
||||
data->last_state_idx = 0;
|
||||
data->exit_us = 0;
|
||||
|
||||
/* Special case when user has set very strict latency requirement */
|
||||
if (unlikely(latency_req == 0))
|
||||
@ -306,13 +304,11 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
|
||||
/* determine the expected residency time, round up */
|
||||
t = ktime_to_timespec(tick_nohz_get_sleep_length());
|
||||
data->expected_us =
|
||||
data->next_timer_us =
|
||||
t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
|
||||
|
||||
|
||||
data->bucket = which_bucket(data->expected_us);
|
||||
|
||||
multiplier = performance_multiplier();
|
||||
data->bucket = which_bucket(data->next_timer_us);
|
||||
|
||||
/*
|
||||
* if the correction factor is 0 (eg first time init or cpu hotplug
|
||||
@ -326,17 +322,26 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
* operands are 32 bits.
|
||||
* Make sure to round up for half microseconds.
|
||||
*/
|
||||
data->predicted_us = div_round64((uint64_t)data->expected_us *
|
||||
data->predicted_us = div_round64((uint64_t)data->next_timer_us *
|
||||
data->correction_factor[data->bucket],
|
||||
RESOLUTION * DECAY);
|
||||
|
||||
get_typical_interval(data);
|
||||
|
||||
/*
|
||||
* Performance multiplier defines a minimum predicted idle
|
||||
* duration / latency ratio. Adjust the latency limit if
|
||||
* necessary.
|
||||
*/
|
||||
interactivity_req = data->predicted_us / performance_multiplier();
|
||||
if (latency_req > interactivity_req)
|
||||
latency_req = interactivity_req;
|
||||
|
||||
/*
|
||||
* We want to default to C1 (hlt), not to busy polling
|
||||
* unless the timer is happening really really soon.
|
||||
*/
|
||||
if (data->expected_us > 5 &&
|
||||
if (data->next_timer_us > 5 &&
|
||||
!drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
|
||||
dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
|
||||
data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
|
||||
@ -355,11 +360,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
continue;
|
||||
if (s->exit_latency > latency_req)
|
||||
continue;
|
||||
if (s->exit_latency * multiplier > data->predicted_us)
|
||||
continue;
|
||||
|
||||
data->last_state_idx = i;
|
||||
data->exit_us = s->exit_latency;
|
||||
}
|
||||
|
||||
return data->last_state_idx;
|
||||
@ -390,36 +392,47 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
{
|
||||
struct menu_device *data = &__get_cpu_var(menu_devices);
|
||||
int last_idx = data->last_state_idx;
|
||||
unsigned int last_idle_us = cpuidle_get_last_residency(dev);
|
||||
struct cpuidle_state *target = &drv->states[last_idx];
|
||||
unsigned int measured_us;
|
||||
unsigned int new_factor;
|
||||
|
||||
/*
|
||||
* Ugh, this idle state doesn't support residency measurements, so we
|
||||
* are basically lost in the dark. As a compromise, assume we slept
|
||||
* for the whole expected time.
|
||||
* Try to figure out how much time passed between entry to low
|
||||
* power state and occurrence of the wakeup event.
|
||||
*
|
||||
* If the entered idle state didn't support residency measurements,
|
||||
* we are basically lost in the dark how much time passed.
|
||||
* As a compromise, assume we slept for the whole expected time.
|
||||
*
|
||||
* Any measured amount of time will include the exit latency.
|
||||
* Since we are interested in when the wakeup begun, not when it
|
||||
* was completed, we must substract the exit latency. However, if
|
||||
* the measured amount of time is less than the exit latency,
|
||||
* assume the state was never reached and the exit latency is 0.
|
||||
*/
|
||||
if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
|
||||
last_idle_us = data->expected_us;
|
||||
if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID))) {
|
||||
/* Use timer value as is */
|
||||
measured_us = data->next_timer_us;
|
||||
|
||||
} else {
|
||||
/* Use measured value */
|
||||
measured_us = cpuidle_get_last_residency(dev);
|
||||
|
||||
measured_us = last_idle_us;
|
||||
|
||||
/*
|
||||
* We correct for the exit latency; we are assuming here that the
|
||||
* exit latency happens after the event that we're interested in.
|
||||
*/
|
||||
if (measured_us > data->exit_us)
|
||||
measured_us -= data->exit_us;
|
||||
/* Deduct exit latency */
|
||||
if (measured_us > target->exit_latency)
|
||||
measured_us -= target->exit_latency;
|
||||
|
||||
/* Make sure our coefficients do not exceed unity */
|
||||
if (measured_us > data->next_timer_us)
|
||||
measured_us = data->next_timer_us;
|
||||
}
|
||||
|
||||
/* Update our correction ratio */
|
||||
new_factor = data->correction_factor[data->bucket];
|
||||
new_factor -= new_factor / DECAY;
|
||||
|
||||
if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
|
||||
new_factor += RESOLUTION * measured_us / data->expected_us;
|
||||
if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
|
||||
new_factor += RESOLUTION * measured_us / data->next_timer_us;
|
||||
else
|
||||
/*
|
||||
* we were idle so long that we count it as a perfect
|
||||
@ -439,7 +452,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
data->correction_factor[data->bucket] = new_factor;
|
||||
|
||||
/* update the repeating-pattern data */
|
||||
data->intervals[data->interval_ptr++] = last_idle_us;
|
||||
data->intervals[data->interval_ptr++] = measured_us;
|
||||
if (data->interval_ptr >= INTERVALS)
|
||||
data->interval_ptr = 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user