mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
tick: Avoid programming the local cpu timer if broadcast pending
If the local cpu timer stops in deep idle, we arm the broadcast device and get woken by an IPI. Now when we return from deep idle we reenable the local cpu timer unconditionally before handling the IPI. But that's a pointless exercise: the timer is already expired and the IPI is on the way. And it's an expensive exercise as we use the forced reprogramming mode so that we do not lose a timer event. This forced reprogramming will loop at least once in the retry. To avoid this reprogramming, we mark the cpu in a pending bit mask before we send the IPI. Now when the IPI target cpu wakes up, it will see the pending bit set and skip the reprogramming. The reprogramming of the cpu local timer will happen in the IPI handler which runs the cpu local timer interrupt function. Reported-by: Jason Liu <liu.h.jason@gmail.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: LAK <linux-arm-kernel@lists.infradead.org> Cc: John Stultz <john.stultz@linaro.org> Cc: Arjan van de Veen <arjan@infradead.org> Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Tested-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Link: http://lkml.kernel.org/r/20130306111537.431082074@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
f7dce82d53
commit
26517f3e99
@ -392,6 +392,7 @@ int tick_resume_broadcast(void)
|
||||
#ifdef CONFIG_TICK_ONESHOT
|
||||
|
||||
static cpumask_var_t tick_broadcast_oneshot_mask;
|
||||
static cpumask_var_t tick_broadcast_pending_mask;
|
||||
|
||||
/*
|
||||
* Exposed for debugging: see timer_list.c
|
||||
@ -470,6 +471,12 @@ again:
|
||||
td = &per_cpu(tick_cpu_device, cpu);
|
||||
if (td->evtdev->next_event.tv64 <= now.tv64) {
|
||||
cpumask_set_cpu(cpu, tmpmask);
|
||||
/*
|
||||
* Mark the remote cpu in the pending mask, so
|
||||
* it can avoid reprogramming the cpu local
|
||||
* timer in tick_broadcast_oneshot_control().
|
||||
*/
|
||||
cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
|
||||
} else if (td->evtdev->next_event.tv64 < next_event.tv64) {
|
||||
next_event.tv64 = td->evtdev->next_event.tv64;
|
||||
next_cpu = cpu;
|
||||
@ -535,6 +542,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
|
||||
|
||||
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
|
||||
WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
|
||||
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
|
||||
clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
|
||||
if (dev->next_event.tv64 < bc->next_event.tv64)
|
||||
@ -543,10 +551,25 @@ void tick_broadcast_oneshot_control(unsigned long reason)
|
||||
} else {
|
||||
if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
|
||||
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
|
||||
if (dev->next_event.tv64 != KTIME_MAX)
|
||||
tick_program_event(dev->next_event, 1);
|
||||
if (dev->next_event.tv64 == KTIME_MAX)
|
||||
goto out;
|
||||
/*
|
||||
* The cpu which was handling the broadcast
|
||||
* timer marked this cpu in the broadcast
|
||||
* pending mask and fired the broadcast
|
||||
* IPI. So we are going to handle the expired
|
||||
* event anyway via the broadcast IPI
|
||||
* handler. No need to reprogram the timer
|
||||
* with an already expired event.
|
||||
*/
|
||||
if (cpumask_test_and_clear_cpu(cpu,
|
||||
tick_broadcast_pending_mask))
|
||||
goto out;
|
||||
|
||||
tick_program_event(dev->next_event, 1);
|
||||
}
|
||||
}
|
||||
out:
|
||||
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
}
|
||||
|
||||
@ -683,5 +706,6 @@ void __init tick_broadcast_init(void)
|
||||
alloc_cpumask_var(&tmpmask, GFP_NOWAIT);
|
||||
#ifdef CONFIG_TICK_ONESHOT
|
||||
alloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
|
||||
alloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
|
||||
#endif
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user