[ARM] idle: clean up pm_idle calling, obey hlt_counter

pm_idle is used by infrastructure (eg, cpuidle) which expects architectures
to call it in a certain way.  Arrange for ARM to follow x86's lead on this
and call pm_idle() with interrupts already disabled.  However, we expect
pm_idle() to enable interrupts before it returns.

Also, OMAP wants to be able to disable hlt-ing, so allow hlt_counter to
prevent all calls to pm_idle.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
Russell King 2009-06-22 22:34:55 +01:00 committed by Russell King
parent 915166d96f
commit 9ccdac3662

View File

@ -114,9 +114,6 @@ void arm_machine_restart(char mode, const char *cmd)
/* /*
* Function pointers to optional machine specific functions * Function pointers to optional machine specific functions
*/ */
void (*pm_idle)(void);
EXPORT_SYMBOL(pm_idle);
void (*pm_power_off)(void); void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off); EXPORT_SYMBOL(pm_power_off);
@ -130,20 +127,19 @@ EXPORT_SYMBOL_GPL(arm_pm_restart);
*/ */
static void default_idle(void) static void default_idle(void)
{ {
if (hlt_counter) if (!need_resched())
cpu_relax(); arch_idle();
else { local_irq_enable();
local_irq_disable();
if (!need_resched())
arch_idle();
local_irq_enable();
}
} }
void (*pm_idle)(void) = default_idle;
EXPORT_SYMBOL(pm_idle);
/* /*
* The idle thread. We try to conserve power, while trying to keep * The idle thread, has rather strange semantics for calling pm_idle,
* overall latency low. The architecture specific idle is passed * but this is what x86 does and we need to do the same, so that
* a value to indicate the level of "idleness" of the system. * things like cpuidle get called in the same way. The only difference
* is that we always respect 'hlt_counter' to prevent low power idle.
*/ */
void cpu_idle(void) void cpu_idle(void)
{ {
@ -151,21 +147,31 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */ /* endless idle loop with no priority at all */
while (1) { while (1) {
void (*idle)(void) = pm_idle; tick_nohz_stop_sched_tick(1);
leds_event(led_idle_start);
while (!need_resched()) {
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(smp_processor_id())) { if (cpu_is_offline(smp_processor_id()))
leds_event(led_idle_start); cpu_die();
cpu_die();
}
#endif #endif
if (!idle) local_irq_disable();
idle = default_idle; if (hlt_counter) {
leds_event(led_idle_start); local_irq_enable();
tick_nohz_stop_sched_tick(1); cpu_relax();
while (!need_resched()) } else {
idle(); stop_critical_timings();
pm_idle();
start_critical_timings();
/*
* This will eventually be removed - pm_idle
* functions should always return with IRQs
* enabled.
*/
WARN_ON(irqs_disabled());
local_irq_enable();
}
}
leds_event(led_idle_end); leds_event(led_idle_end);
tick_nohz_restart_sched_tick(); tick_nohz_restart_sched_tick();
preempt_enable_no_resched(); preempt_enable_no_resched();