mirror of
https://github.com/torvalds/linux.git
synced 2024-09-21 07:23:06 +00:00
tick: Shut down low-res tick from dying CPU
The timekeeping duty is handed over from the outgoing CPU within stop machine. This works well if CONFIG_NO_HZ_COMMON=n or the tick is in high-res mode. However in low-res dynticks mode, the tick isn't cancelled until the clockevent is shut down, which can happen later. The tick may therefore fire again once IRQs are re-enabled on stop machine and until IRQs are disabled for good upon the last call to idle. That's so many opportunities for a timekeeper to go idle and the outgoing CPU to take over that duty. This is why tick_nohz_idle_stop_tick() is called one last time on idle if the CPU is seen offline: so that the timekeeping duty is handed over again in case the CPU has re-taken the duty. This means there are two timekeeping handovers on CPU down hotplug with different undocumented constraints and purposes: 1) A handover on stop machine for !dynticks || highres. All online CPUs are guaranteed to be non-idle and the timekeeping duty can be safely handed-over. The hrtimer tick is cancelled so it is guaranteed that in dynticks mode the outgoing CPU won't take again the duty. 2) A handover on last idle call for dynticks && lowres. Setting the duty to TICK_DO_TIMER_NONE makes sure that a CPU will take over the timekeeping. Prepare for consolidating the handover to a single place (the first one) with shutting down the low-res tick as well from tick_cancel_sched_timer() as well. This will simplify the handover and unify the tick cancellation between high-res and low-res. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20240225225508.11587-15-frederic@kernel.org
This commit is contained in:
parent
7988e5ae2b
commit
3f69d04e14
|
@ -410,7 +410,8 @@ int tick_cpu_dying(unsigned int dying_cpu)
|
||||||
if (tick_do_timer_cpu == dying_cpu)
|
if (tick_do_timer_cpu == dying_cpu)
|
||||||
tick_do_timer_cpu = cpumask_first(cpu_online_mask);
|
tick_do_timer_cpu = cpumask_first(cpu_online_mask);
|
||||||
|
|
||||||
tick_cancel_sched_timer(dying_cpu);
|
/* Make sure the CPU won't try to retake the timekeeping duty */
|
||||||
|
tick_sched_timer_dying(dying_cpu);
|
||||||
|
|
||||||
/* Remove CPU from timer broadcasting */
|
/* Remove CPU from timer broadcasting */
|
||||||
tick_offline_cpu(dying_cpu);
|
tick_offline_cpu(dying_cpu);
|
||||||
|
|
|
@ -308,6 +308,14 @@ static enum hrtimer_restart tick_nohz_handler(struct hrtimer *timer)
|
||||||
return HRTIMER_RESTART;
|
return HRTIMER_RESTART;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void tick_sched_timer_cancel(struct tick_sched *ts)
|
||||||
|
{
|
||||||
|
if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES))
|
||||||
|
hrtimer_cancel(&ts->sched_timer);
|
||||||
|
else if (tick_sched_flag_test(ts, TS_FLAG_NOHZ))
|
||||||
|
tick_program_event(KTIME_MAX, 1);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ_FULL
|
#ifdef CONFIG_NO_HZ_FULL
|
||||||
cpumask_var_t tick_nohz_full_mask;
|
cpumask_var_t tick_nohz_full_mask;
|
||||||
EXPORT_SYMBOL_GPL(tick_nohz_full_mask);
|
EXPORT_SYMBOL_GPL(tick_nohz_full_mask);
|
||||||
|
@ -1040,10 +1048,7 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
|
||||||
* the tick timer.
|
* the tick timer.
|
||||||
*/
|
*/
|
||||||
if (unlikely(expires == KTIME_MAX)) {
|
if (unlikely(expires == KTIME_MAX)) {
|
||||||
if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES))
|
tick_sched_timer_cancel(ts);
|
||||||
hrtimer_cancel(&ts->sched_timer);
|
|
||||||
else
|
|
||||||
tick_program_event(KTIME_MAX, 1);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1598,14 +1603,27 @@ void tick_setup_sched_timer(bool hrtimer)
|
||||||
tick_nohz_activate(ts);
|
tick_nohz_activate(ts);
|
||||||
}
|
}
|
||||||
|
|
||||||
void tick_cancel_sched_timer(int cpu)
|
/*
|
||||||
|
* Shut down the tick and make sure the CPU won't try to retake the timekeeping
|
||||||
|
* duty before disabling IRQs in idle for the last time.
|
||||||
|
*/
|
||||||
|
void tick_sched_timer_dying(int cpu)
|
||||||
{
|
{
|
||||||
|
struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
|
||||||
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
|
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
|
||||||
|
struct clock_event_device *dev = td->evtdev;
|
||||||
ktime_t idle_sleeptime, iowait_sleeptime;
|
ktime_t idle_sleeptime, iowait_sleeptime;
|
||||||
unsigned long idle_calls, idle_sleeps;
|
unsigned long idle_calls, idle_sleeps;
|
||||||
|
|
||||||
if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES))
|
/* This must happen before hrtimers are migrated! */
|
||||||
hrtimer_cancel(&ts->sched_timer);
|
tick_sched_timer_cancel(ts);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the clockevents doesn't support CLOCK_EVT_STATE_ONESHOT_STOPPED,
|
||||||
|
* make sure not to call low-res tick handler.
|
||||||
|
*/
|
||||||
|
if (tick_sched_flag_test(ts, TS_FLAG_NOHZ))
|
||||||
|
dev->event_handler = clockevents_handle_noop;
|
||||||
|
|
||||||
idle_sleeptime = ts->idle_sleeptime;
|
idle_sleeptime = ts->idle_sleeptime;
|
||||||
iowait_sleeptime = ts->iowait_sleeptime;
|
iowait_sleeptime = ts->iowait_sleeptime;
|
||||||
|
|
|
@ -106,9 +106,9 @@ extern struct tick_sched *tick_get_tick_sched(int cpu);
|
||||||
|
|
||||||
extern void tick_setup_sched_timer(bool hrtimer);
|
extern void tick_setup_sched_timer(bool hrtimer);
|
||||||
#if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
|
#if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
|
||||||
extern void tick_cancel_sched_timer(int cpu);
|
extern void tick_sched_timer_dying(int cpu);
|
||||||
#else
|
#else
|
||||||
static inline void tick_cancel_sched_timer(int cpu) { }
|
static inline void tick_sched_timer_dying(int cpu) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
||||||
|
|
Loading…
Reference in New Issue
Block a user