Merge tag 'timers-core-2020-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timers and timekeeping updates from Thomas Gleixner:
"Core:
- Robustness improvements for the NOHZ tick management
- Fixes and consolidation of the NTP/RTC synchronization code
- Small fixes and improvements in various places
- A set of function documentation udpates and fixes
Drivers:
- Cleanups and improvements in various clocksoure/event drivers
- Removal of the EZChip NPS clocksource driver as the platfrom
support was removed from ARC
- The usual set of new device tree binding and json conversions
- The RTC driver which have been acked by the RTC maintainer:
* fix a long standing bug in the MC146818 library code which can
cause reading garbage during the RTC internal update.
* changes related to the NTP/RTC consolidation work"
* tag 'timers-core-2020-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (46 commits)
ntp: Fix prototype in the !CONFIG_GENERIC_CMOS_UPDATE case
tick/sched: Make jiffies update quick check more robust
ntp: Consolidate the RTC update implementation
ntp: Make the RTC sync offset less obscure
ntp, rtc: Move rtc_set_ntp_time() to ntp code
ntp: Make the RTC synchronization more reliable
rtc: core: Make the sync offset default more realistic
rtc: cmos: Make rtc_cmos sync offset correct
rtc: mc146818: Reduce spinlock section in mc146818_set_time()
rtc: mc146818: Prevent reading garbage
clocksource/drivers/sh_cmt: Fix potential deadlock when calling runtime PM
clocksource/drivers/arm_arch_timer: Correct fault programming of CNTKCTL_EL1.EVNTI
clocksource/drivers/arm_arch_timer: Use stable count reader in erratum sne
clocksource/drivers/dw_apb_timer_of: Add error handling if no clock available
clocksource/drivers/riscv: Make RISCV_TIMER depends on RISCV_SBI
clocksource/drivers/ingenic: Fix section mismatch
clocksource/drivers/cadence_ttc: Fix memory leak in ttc_setup_clockevent()
dt-bindings: timer: renesas: tmu: Convert to json-schema
dt-bindings: timer: renesas: tmu: Document r8a774e1 bindings
clocksource/drivers/orion: Add missing clk_disable_unprepare() on error path
...
This commit is contained in:
@@ -1284,7 +1284,7 @@ int hrtimer_cancel(struct hrtimer *timer)
|
||||
EXPORT_SYMBOL_GPL(hrtimer_cancel);
|
||||
|
||||
/**
|
||||
* hrtimer_get_remaining - get remaining time for the timer
|
||||
* __hrtimer_get_remaining - get remaining time for the timer
|
||||
* @timer: the timer to read
|
||||
* @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y
|
||||
*/
|
||||
|
||||
@@ -59,7 +59,8 @@ static struct clocksource clocksource_jiffies = {
|
||||
};
|
||||
|
||||
__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock);
|
||||
__cacheline_aligned_in_smp seqcount_t jiffies_seq;
|
||||
__cacheline_aligned_in_smp seqcount_raw_spinlock_t jiffies_seq =
|
||||
SEQCNT_RAW_SPINLOCK_ZERO(jiffies_seq, &jiffies_lock);
|
||||
|
||||
#if (BITS_PER_LONG < 64)
|
||||
u64 get_jiffies_64(void)
|
||||
|
||||
@@ -494,65 +494,74 @@ out:
|
||||
return leap;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC)
|
||||
static void sync_hw_clock(struct work_struct *work);
|
||||
static DECLARE_DELAYED_WORK(sync_work, sync_hw_clock);
|
||||
|
||||
static void sched_sync_hw_clock(struct timespec64 now,
|
||||
unsigned long target_nsec, bool fail)
|
||||
static DECLARE_WORK(sync_work, sync_hw_clock);
|
||||
static struct hrtimer sync_hrtimer;
|
||||
#define SYNC_PERIOD_NS (11UL * 60 * NSEC_PER_SEC)
|
||||
|
||||
static enum hrtimer_restart sync_timer_callback(struct hrtimer *timer)
|
||||
{
|
||||
struct timespec64 next;
|
||||
queue_work(system_power_efficient_wq, &sync_work);
|
||||
|
||||
ktime_get_real_ts64(&next);
|
||||
if (!fail)
|
||||
next.tv_sec = 659;
|
||||
else {
|
||||
/*
|
||||
* Try again as soon as possible. Delaying long periods
|
||||
* decreases the accuracy of the work queue timer. Due to this
|
||||
* the algorithm is very likely to require a short-sleep retry
|
||||
* after the above long sleep to synchronize ts_nsec.
|
||||
*/
|
||||
next.tv_sec = 0;
|
||||
}
|
||||
|
||||
/* Compute the needed delay that will get to tv_nsec == target_nsec */
|
||||
next.tv_nsec = target_nsec - next.tv_nsec;
|
||||
if (next.tv_nsec <= 0)
|
||||
next.tv_nsec += NSEC_PER_SEC;
|
||||
if (next.tv_nsec >= NSEC_PER_SEC) {
|
||||
next.tv_sec++;
|
||||
next.tv_nsec -= NSEC_PER_SEC;
|
||||
}
|
||||
|
||||
queue_delayed_work(system_power_efficient_wq, &sync_work,
|
||||
timespec64_to_jiffies(&next));
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
static void sync_rtc_clock(void)
|
||||
static void sched_sync_hw_clock(unsigned long offset_nsec, bool retry)
|
||||
{
|
||||
unsigned long target_nsec;
|
||||
struct timespec64 adjust, now;
|
||||
int rc;
|
||||
ktime_t exp = ktime_set(ktime_get_real_seconds(), 0);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_RTC_SYSTOHC))
|
||||
return;
|
||||
if (retry)
|
||||
exp = ktime_add_ns(exp, 2 * NSEC_PER_SEC - offset_nsec);
|
||||
else
|
||||
exp = ktime_add_ns(exp, SYNC_PERIOD_NS - offset_nsec);
|
||||
|
||||
ktime_get_real_ts64(&now);
|
||||
hrtimer_start(&sync_hrtimer, exp, HRTIMER_MODE_ABS);
|
||||
}
|
||||
|
||||
adjust = now;
|
||||
if (persistent_clock_is_local)
|
||||
adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
|
||||
/*
|
||||
* Check whether @now is correct versus the required time to update the RTC
|
||||
* and calculate the value which needs to be written to the RTC so that the
|
||||
* next seconds increment of the RTC after the write is aligned with the next
|
||||
* seconds increment of clock REALTIME.
|
||||
*
|
||||
* tsched t1 write(t2.tv_sec - 1sec)) t2 RTC increments seconds
|
||||
*
|
||||
* t2.tv_nsec == 0
|
||||
* tsched = t2 - set_offset_nsec
|
||||
* newval = t2 - NSEC_PER_SEC
|
||||
*
|
||||
* ==> neval = tsched + set_offset_nsec - NSEC_PER_SEC
|
||||
*
|
||||
* As the execution of this code is not guaranteed to happen exactly at
|
||||
* tsched this allows it to happen within a fuzzy region:
|
||||
*
|
||||
* abs(now - tsched) < FUZZ
|
||||
*
|
||||
* If @now is not inside the allowed window the function returns false.
|
||||
*/
|
||||
static inline bool rtc_tv_nsec_ok(unsigned long set_offset_nsec,
|
||||
struct timespec64 *to_set,
|
||||
const struct timespec64 *now)
|
||||
{
|
||||
/* Allowed error in tv_nsec, arbitarily set to 5 jiffies in ns. */
|
||||
const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5;
|
||||
struct timespec64 delay = {.tv_sec = -1,
|
||||
.tv_nsec = set_offset_nsec};
|
||||
|
||||
/*
|
||||
* The current RTC in use will provide the target_nsec it wants to be
|
||||
* called at, and does rtc_tv_nsec_ok internally.
|
||||
*/
|
||||
rc = rtc_set_ntp_time(adjust, &target_nsec);
|
||||
if (rc == -ENODEV)
|
||||
return;
|
||||
*to_set = timespec64_add(*now, delay);
|
||||
|
||||
sched_sync_hw_clock(now, target_nsec, rc);
|
||||
if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) {
|
||||
to_set->tv_nsec = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) {
|
||||
to_set->tv_sec++;
|
||||
to_set->tv_nsec = 0;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GENERIC_CMOS_UPDATE
|
||||
@@ -560,48 +569,47 @@ int __weak update_persistent_clock64(struct timespec64 now64)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#else
|
||||
static inline int update_persistent_clock64(struct timespec64 now64)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool sync_cmos_clock(void)
|
||||
#ifdef CONFIG_RTC_SYSTOHC
|
||||
/* Save NTP synchronized time to the RTC */
|
||||
static int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec)
|
||||
{
|
||||
static bool no_cmos;
|
||||
struct timespec64 now;
|
||||
struct timespec64 adjust;
|
||||
int rc = -EPROTO;
|
||||
long target_nsec = NSEC_PER_SEC / 2;
|
||||
struct rtc_device *rtc;
|
||||
struct rtc_time tm;
|
||||
int err = -ENODEV;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_GENERIC_CMOS_UPDATE))
|
||||
return false;
|
||||
rtc = rtc_class_open(CONFIG_RTC_SYSTOHC_DEVICE);
|
||||
if (!rtc)
|
||||
return -ENODEV;
|
||||
|
||||
if (no_cmos)
|
||||
return false;
|
||||
if (!rtc->ops || !rtc->ops->set_time)
|
||||
goto out_close;
|
||||
|
||||
/*
|
||||
* Historically update_persistent_clock64() has followed x86
|
||||
* semantics, which match the MC146818A/etc RTC. This RTC will store
|
||||
* 'adjust' and then in .5s it will advance once second.
|
||||
*
|
||||
* Architectures are strongly encouraged to use rtclib and not
|
||||
* implement this legacy API.
|
||||
*/
|
||||
ktime_get_real_ts64(&now);
|
||||
if (rtc_tv_nsec_ok(-1 * target_nsec, &adjust, &now)) {
|
||||
if (persistent_clock_is_local)
|
||||
adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
|
||||
rc = update_persistent_clock64(adjust);
|
||||
/*
|
||||
* The machine does not support update_persistent_clock64 even
|
||||
* though it defines CONFIG_GENERIC_CMOS_UPDATE.
|
||||
*/
|
||||
if (rc == -ENODEV) {
|
||||
no_cmos = true;
|
||||
return false;
|
||||
}
|
||||
/* First call might not have the correct offset */
|
||||
if (*offset_nsec == rtc->set_offset_nsec) {
|
||||
rtc_time64_to_tm(to_set->tv_sec, &tm);
|
||||
err = rtc_set_time(rtc, &tm);
|
||||
} else {
|
||||
/* Store the update offset and let the caller try again */
|
||||
*offset_nsec = rtc->set_offset_nsec;
|
||||
err = -EAGAIN;
|
||||
}
|
||||
|
||||
sched_sync_hw_clock(now, target_nsec, rc);
|
||||
return true;
|
||||
out_close:
|
||||
rtc_class_close(rtc);
|
||||
return err;
|
||||
}
|
||||
#else
|
||||
static inline int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If we have an externally synchronized Linux clock, then update RTC clock
|
||||
@@ -613,25 +621,65 @@ static bool sync_cmos_clock(void)
|
||||
*/
|
||||
static void sync_hw_clock(struct work_struct *work)
|
||||
{
|
||||
if (!ntp_synced())
|
||||
/*
|
||||
* The default synchronization offset is 500ms for the deprecated
|
||||
* update_persistent_clock64() under the assumption that it uses
|
||||
* the infamous CMOS clock (MC146818).
|
||||
*/
|
||||
static unsigned long offset_nsec = NSEC_PER_SEC / 2;
|
||||
struct timespec64 now, to_set;
|
||||
int res = -EAGAIN;
|
||||
|
||||
/*
|
||||
* Don't update if STA_UNSYNC is set and if ntp_notify_cmos_timer()
|
||||
* managed to schedule the work between the timer firing and the
|
||||
* work being able to rearm the timer. Wait for the timer to expire.
|
||||
*/
|
||||
if (!ntp_synced() || hrtimer_is_queued(&sync_hrtimer))
|
||||
return;
|
||||
|
||||
if (sync_cmos_clock())
|
||||
return;
|
||||
ktime_get_real_ts64(&now);
|
||||
/* If @now is not in the allowed window, try again */
|
||||
if (!rtc_tv_nsec_ok(offset_nsec, &to_set, &now))
|
||||
goto rearm;
|
||||
|
||||
sync_rtc_clock();
|
||||
/* Take timezone adjusted RTCs into account */
|
||||
if (persistent_clock_is_local)
|
||||
to_set.tv_sec -= (sys_tz.tz_minuteswest * 60);
|
||||
|
||||
/* Try the legacy RTC first. */
|
||||
res = update_persistent_clock64(to_set);
|
||||
if (res != -ENODEV)
|
||||
goto rearm;
|
||||
|
||||
/* Try the RTC class */
|
||||
res = update_rtc(&to_set, &offset_nsec);
|
||||
if (res == -ENODEV)
|
||||
return;
|
||||
rearm:
|
||||
sched_sync_hw_clock(offset_nsec, res != 0);
|
||||
}
|
||||
|
||||
void ntp_notify_cmos_timer(void)
|
||||
{
|
||||
if (!ntp_synced())
|
||||
return;
|
||||
|
||||
if (IS_ENABLED(CONFIG_GENERIC_CMOS_UPDATE) ||
|
||||
IS_ENABLED(CONFIG_RTC_SYSTOHC))
|
||||
queue_delayed_work(system_power_efficient_wq, &sync_work, 0);
|
||||
/*
|
||||
* When the work is currently executed but has not yet the timer
|
||||
* rearmed this queues the work immediately again. No big issue,
|
||||
* just a pointless work scheduled.
|
||||
*/
|
||||
if (ntp_synced() && !hrtimer_is_queued(&sync_hrtimer))
|
||||
queue_work(system_power_efficient_wq, &sync_work);
|
||||
}
|
||||
|
||||
static void __init ntp_init_cmos_sync(void)
|
||||
{
|
||||
hrtimer_init(&sync_hrtimer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
|
||||
sync_hrtimer.function = sync_timer_callback;
|
||||
}
|
||||
#else /* CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */
|
||||
static inline void __init ntp_init_cmos_sync(void) { }
|
||||
#endif /* !CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */
|
||||
|
||||
/*
|
||||
* Propagate a new txc->status value into the NTP state:
|
||||
*/
|
||||
@@ -1044,4 +1092,5 @@ __setup("ntp_tick_adj=", ntp_tick_adj_setup);
|
||||
void __init ntp_init(void)
|
||||
{
|
||||
ntp_clear();
|
||||
ntp_init_cmos_sync();
|
||||
}
|
||||
|
||||
@@ -12,4 +12,11 @@ extern int __do_adjtimex(struct __kernel_timex *txc,
|
||||
const struct timespec64 *ts,
|
||||
s32 *time_tai, struct audit_ntp_data *ad);
|
||||
extern void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts);
|
||||
|
||||
#if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC)
|
||||
extern void ntp_notify_cmos_timer(void);
|
||||
#else
|
||||
static inline void ntp_notify_cmos_timer(void) { }
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_NTP_INTERNAL_H */
|
||||
|
||||
@@ -331,7 +331,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
|
||||
bc_local = tick_do_periodic_broadcast();
|
||||
|
||||
if (clockevent_state_oneshot(dev)) {
|
||||
ktime_t next = ktime_add(dev->next_event, tick_period);
|
||||
ktime_t next = ktime_add_ns(dev->next_event, TICK_NSEC);
|
||||
|
||||
clockevents_program_event(dev, next, true);
|
||||
}
|
||||
@@ -877,6 +877,22 @@ static void tick_broadcast_init_next_event(struct cpumask *mask,
|
||||
}
|
||||
}
|
||||
|
||||
static inline ktime_t tick_get_next_period(void)
|
||||
{
|
||||
ktime_t next;
|
||||
|
||||
/*
|
||||
* Protect against concurrent updates (store /load tearing on
|
||||
* 32bit). It does not matter if the time is already in the
|
||||
* past. The broadcast device which is about to be programmed will
|
||||
* fire in any case.
|
||||
*/
|
||||
raw_spin_lock(&jiffies_lock);
|
||||
next = tick_next_period;
|
||||
raw_spin_unlock(&jiffies_lock);
|
||||
return next;
|
||||
}
|
||||
|
||||
/**
|
||||
* tick_broadcast_setup_oneshot - setup the broadcast device
|
||||
*/
|
||||
@@ -905,10 +921,11 @@ static void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
|
||||
tick_broadcast_oneshot_mask, tmpmask);
|
||||
|
||||
if (was_periodic && !cpumask_empty(tmpmask)) {
|
||||
ktime_t nextevt = tick_get_next_period();
|
||||
|
||||
clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
|
||||
tick_broadcast_init_next_event(tmpmask,
|
||||
tick_next_period);
|
||||
tick_broadcast_set_event(bc, cpu, tick_next_period);
|
||||
tick_broadcast_init_next_event(tmpmask, nextevt);
|
||||
tick_broadcast_set_event(bc, cpu, nextevt);
|
||||
} else
|
||||
bc->next_event = KTIME_MAX;
|
||||
} else {
|
||||
|
||||
@@ -27,10 +27,11 @@
|
||||
*/
|
||||
DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
|
||||
/*
|
||||
* Tick next event: keeps track of the tick time
|
||||
* Tick next event: keeps track of the tick time. It's updated by the
|
||||
* CPU which handles the tick and protected by jiffies_lock. There is
|
||||
* no requirement to write hold the jiffies seqcount for it.
|
||||
*/
|
||||
ktime_t tick_next_period;
|
||||
ktime_t tick_period;
|
||||
|
||||
/*
|
||||
* tick_do_timer_cpu is a timer core internal variable which holds the CPU NR
|
||||
@@ -88,7 +89,7 @@ static void tick_periodic(int cpu)
|
||||
write_seqcount_begin(&jiffies_seq);
|
||||
|
||||
/* Keep track of the next tick event */
|
||||
tick_next_period = ktime_add(tick_next_period, tick_period);
|
||||
tick_next_period = ktime_add_ns(tick_next_period, TICK_NSEC);
|
||||
|
||||
do_timer(1);
|
||||
write_seqcount_end(&jiffies_seq);
|
||||
@@ -127,7 +128,7 @@ void tick_handle_periodic(struct clock_event_device *dev)
|
||||
* Setup the next period for devices, which do not have
|
||||
* periodic mode:
|
||||
*/
|
||||
next = ktime_add(next, tick_period);
|
||||
next = ktime_add_ns(next, TICK_NSEC);
|
||||
|
||||
if (!clockevents_program_event(dev, next, false))
|
||||
return;
|
||||
@@ -173,7 +174,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
|
||||
for (;;) {
|
||||
if (!clockevents_program_event(dev, next, false))
|
||||
return;
|
||||
next = ktime_add(next, tick_period);
|
||||
next = ktime_add_ns(next, TICK_NSEC);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -220,7 +221,6 @@ static void tick_setup_device(struct tick_device *td,
|
||||
tick_do_timer_cpu = cpu;
|
||||
|
||||
tick_next_period = ktime_get();
|
||||
tick_period = NSEC_PER_SEC / HZ;
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
/*
|
||||
* The boot CPU may be nohz_full, in which case set
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
|
||||
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
|
||||
extern ktime_t tick_next_period;
|
||||
extern ktime_t tick_period;
|
||||
extern int tick_do_timer_cpu __read_mostly;
|
||||
|
||||
extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <linux/sched/clock.h>
|
||||
#include <linux/sched/stat.h>
|
||||
#include <linux/sched/nohz.h>
|
||||
#include <linux/sched/loadavg.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/posix-timers.h>
|
||||
@@ -44,7 +45,9 @@ struct tick_sched *tick_get_tick_sched(int cpu)
|
||||
|
||||
#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
|
||||
/*
|
||||
* The time, when the last jiffy update happened. Protected by jiffies_lock.
|
||||
* The time, when the last jiffy update happened. Write access must hold
|
||||
* jiffies_lock and jiffies_seq. tick_nohz_next_event() needs to get a
|
||||
* consistent view of jiffies and last_jiffies_update.
|
||||
*/
|
||||
static ktime_t last_jiffies_update;
|
||||
|
||||
@@ -53,50 +56,97 @@ static ktime_t last_jiffies_update;
|
||||
*/
|
||||
static void tick_do_update_jiffies64(ktime_t now)
|
||||
{
|
||||
unsigned long ticks = 0;
|
||||
ktime_t delta;
|
||||
unsigned long ticks = 1;
|
||||
ktime_t delta, nextp;
|
||||
|
||||
/*
|
||||
* Do a quick check without holding jiffies_lock:
|
||||
* The READ_ONCE() pairs with two updates done later in this function.
|
||||
* 64bit can do a quick check without holding jiffies lock and
|
||||
* without looking at the sequence count. The smp_load_acquire()
|
||||
* pairs with the update done later in this function.
|
||||
*
|
||||
* 32bit cannot do that because the store of tick_next_period
|
||||
* consists of two 32bit stores and the first store could move it
|
||||
* to a random point in the future.
|
||||
*/
|
||||
delta = ktime_sub(now, READ_ONCE(last_jiffies_update));
|
||||
if (delta < tick_period)
|
||||
return;
|
||||
|
||||
/* Reevaluate with jiffies_lock held */
|
||||
raw_spin_lock(&jiffies_lock);
|
||||
write_seqcount_begin(&jiffies_seq);
|
||||
|
||||
delta = ktime_sub(now, last_jiffies_update);
|
||||
if (delta >= tick_period) {
|
||||
|
||||
delta = ktime_sub(delta, tick_period);
|
||||
/* Pairs with the lockless read in this function. */
|
||||
WRITE_ONCE(last_jiffies_update,
|
||||
ktime_add(last_jiffies_update, tick_period));
|
||||
|
||||
/* Slow path for long timeouts */
|
||||
if (unlikely(delta >= tick_period)) {
|
||||
s64 incr = ktime_to_ns(tick_period);
|
||||
|
||||
ticks = ktime_divns(delta, incr);
|
||||
|
||||
/* Pairs with the lockless read in this function. */
|
||||
WRITE_ONCE(last_jiffies_update,
|
||||
ktime_add_ns(last_jiffies_update,
|
||||
incr * ticks));
|
||||
}
|
||||
do_timer(++ticks);
|
||||
|
||||
/* Keep the tick_next_period variable up to date */
|
||||
tick_next_period = ktime_add(last_jiffies_update, tick_period);
|
||||
if (IS_ENABLED(CONFIG_64BIT)) {
|
||||
if (ktime_before(now, smp_load_acquire(&tick_next_period)))
|
||||
return;
|
||||
} else {
|
||||
write_seqcount_end(&jiffies_seq);
|
||||
unsigned int seq;
|
||||
|
||||
/*
|
||||
* Avoid contention on jiffies_lock and protect the quick
|
||||
* check with the sequence count.
|
||||
*/
|
||||
do {
|
||||
seq = read_seqcount_begin(&jiffies_seq);
|
||||
nextp = tick_next_period;
|
||||
} while (read_seqcount_retry(&jiffies_seq, seq));
|
||||
|
||||
if (ktime_before(now, nextp))
|
||||
return;
|
||||
}
|
||||
|
||||
/* Quick check failed, i.e. update is required. */
|
||||
raw_spin_lock(&jiffies_lock);
|
||||
/*
|
||||
* Reevaluate with the lock held. Another CPU might have done the
|
||||
* update already.
|
||||
*/
|
||||
if (ktime_before(now, tick_next_period)) {
|
||||
raw_spin_unlock(&jiffies_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
write_seqcount_begin(&jiffies_seq);
|
||||
|
||||
delta = ktime_sub(now, tick_next_period);
|
||||
if (unlikely(delta >= TICK_NSEC)) {
|
||||
/* Slow path for long idle sleep times */
|
||||
s64 incr = TICK_NSEC;
|
||||
|
||||
ticks += ktime_divns(delta, incr);
|
||||
|
||||
last_jiffies_update = ktime_add_ns(last_jiffies_update,
|
||||
incr * ticks);
|
||||
} else {
|
||||
last_jiffies_update = ktime_add_ns(last_jiffies_update,
|
||||
TICK_NSEC);
|
||||
}
|
||||
|
||||
/* Advance jiffies to complete the jiffies_seq protected job */
|
||||
jiffies_64 += ticks;
|
||||
|
||||
/*
|
||||
* Keep the tick_next_period variable up to date.
|
||||
*/
|
||||
nextp = ktime_add_ns(last_jiffies_update, TICK_NSEC);
|
||||
|
||||
if (IS_ENABLED(CONFIG_64BIT)) {
|
||||
/*
|
||||
* Pairs with smp_load_acquire() in the lockless quick
|
||||
* check above and ensures that the update to jiffies_64 is
|
||||
* not reordered vs. the store to tick_next_period, neither
|
||||
* by the compiler nor by the CPU.
|
||||
*/
|
||||
smp_store_release(&tick_next_period, nextp);
|
||||
} else {
|
||||
/*
|
||||
* A plain store is good enough on 32bit as the quick check
|
||||
* above is protected by the sequence count.
|
||||
*/
|
||||
tick_next_period = nextp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Release the sequence count. calc_global_load() below is not
|
||||
* protected by it, but jiffies_lock needs to be held to prevent
|
||||
* concurrent invocations.
|
||||
*/
|
||||
write_seqcount_end(&jiffies_seq);
|
||||
|
||||
calc_global_load();
|
||||
|
||||
raw_spin_unlock(&jiffies_lock);
|
||||
update_wall_time();
|
||||
}
|
||||
@@ -661,7 +711,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
|
||||
hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
|
||||
|
||||
/* Forward the time to expire in the future */
|
||||
hrtimer_forward(&ts->sched_timer, now, tick_period);
|
||||
hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
|
||||
|
||||
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
|
||||
hrtimer_start_expires(&ts->sched_timer,
|
||||
@@ -1230,7 +1280,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
|
||||
if (unlikely(ts->tick_stopped))
|
||||
return;
|
||||
|
||||
hrtimer_forward(&ts->sched_timer, now, tick_period);
|
||||
hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
|
||||
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
|
||||
}
|
||||
|
||||
@@ -1267,7 +1317,7 @@ static void tick_nohz_switch_to_nohz(void)
|
||||
next = tick_init_jiffy_update();
|
||||
|
||||
hrtimer_set_expires(&ts->sched_timer, next);
|
||||
hrtimer_forward_now(&ts->sched_timer, tick_period);
|
||||
hrtimer_forward_now(&ts->sched_timer, TICK_NSEC);
|
||||
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
|
||||
tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
|
||||
}
|
||||
@@ -1333,7 +1383,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
|
||||
if (unlikely(ts->tick_stopped))
|
||||
return HRTIMER_NORESTART;
|
||||
|
||||
hrtimer_forward(timer, now, tick_period);
|
||||
hrtimer_forward(timer, now, TICK_NSEC);
|
||||
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
@@ -1367,13 +1417,13 @@ void tick_setup_sched_timer(void)
|
||||
|
||||
/* Offset the tick to avert jiffies_lock contention. */
|
||||
if (sched_skew_tick) {
|
||||
u64 offset = ktime_to_ns(tick_period) >> 1;
|
||||
u64 offset = TICK_NSEC >> 1;
|
||||
do_div(offset, num_possible_cpus());
|
||||
offset *= smp_processor_id();
|
||||
hrtimer_add_expires_ns(&ts->sched_timer, offset);
|
||||
}
|
||||
|
||||
hrtimer_forward(&ts->sched_timer, now, tick_period);
|
||||
hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
|
||||
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD);
|
||||
tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
|
||||
}
|
||||
|
||||
@@ -70,10 +70,10 @@ static const unsigned short __mon_yday[2][13] = {
|
||||
/**
|
||||
* time64_to_tm - converts the calendar time to local broken-down time
|
||||
*
|
||||
* @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
|
||||
* @totalsecs: the number of seconds elapsed since 00:00:00 on January 1, 1970,
|
||||
* Coordinated Universal Time (UTC).
|
||||
* @offset offset seconds adding to totalsecs.
|
||||
* @result pointer to struct tm variable to receive broken-down time
|
||||
* @offset: offset seconds adding to totalsecs.
|
||||
* @result: pointer to struct tm variable to receive broken-down time
|
||||
*/
|
||||
void time64_to_tm(time64_t totalsecs, int offset, struct tm *result)
|
||||
{
|
||||
|
||||
@@ -407,6 +407,7 @@ static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 c
|
||||
/**
|
||||
* update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
|
||||
* @tkr: Timekeeping readout base from which we take the update
|
||||
* @tkf: Pointer to NMI safe timekeeper
|
||||
*
|
||||
* We want to use this from any context including NMI and tracing /
|
||||
* instrumenting the timekeeping code itself.
|
||||
@@ -436,6 +437,27 @@ static void update_fast_timekeeper(const struct tk_read_base *tkr,
|
||||
memcpy(base + 1, base, sizeof(*base));
|
||||
}
|
||||
|
||||
static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
|
||||
{
|
||||
struct tk_read_base *tkr;
|
||||
unsigned int seq;
|
||||
u64 now;
|
||||
|
||||
do {
|
||||
seq = raw_read_seqcount_latch(&tkf->seq);
|
||||
tkr = tkf->base + (seq & 0x01);
|
||||
now = ktime_to_ns(tkr->base);
|
||||
|
||||
now += timekeeping_delta_to_ns(tkr,
|
||||
clocksource_delta(
|
||||
tk_clock_read(tkr),
|
||||
tkr->cycle_last,
|
||||
tkr->mask));
|
||||
} while (read_seqcount_latch_retry(&tkf->seq, seq));
|
||||
|
||||
return now;
|
||||
}
|
||||
|
||||
/**
|
||||
* ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
|
||||
*
|
||||
@@ -462,39 +484,24 @@ static void update_fast_timekeeper(const struct tk_read_base *tkr,
|
||||
*
|
||||
* So reader 6 will observe time going backwards versus reader 5.
|
||||
*
|
||||
* While other CPUs are likely to be able observe that, the only way
|
||||
* While other CPUs are likely to be able to observe that, the only way
|
||||
* for a CPU local observation is when an NMI hits in the middle of
|
||||
* the update. Timestamps taken from that NMI context might be ahead
|
||||
* of the following timestamps. Callers need to be aware of that and
|
||||
* deal with it.
|
||||
*/
|
||||
static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
|
||||
{
|
||||
struct tk_read_base *tkr;
|
||||
unsigned int seq;
|
||||
u64 now;
|
||||
|
||||
do {
|
||||
seq = raw_read_seqcount_latch(&tkf->seq);
|
||||
tkr = tkf->base + (seq & 0x01);
|
||||
now = ktime_to_ns(tkr->base);
|
||||
|
||||
now += timekeeping_delta_to_ns(tkr,
|
||||
clocksource_delta(
|
||||
tk_clock_read(tkr),
|
||||
tkr->cycle_last,
|
||||
tkr->mask));
|
||||
} while (read_seqcount_latch_retry(&tkf->seq, seq));
|
||||
|
||||
return now;
|
||||
}
|
||||
|
||||
u64 ktime_get_mono_fast_ns(void)
|
||||
{
|
||||
return __ktime_get_fast_ns(&tk_fast_mono);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
|
||||
|
||||
/**
|
||||
* ktime_get_raw_fast_ns - Fast NMI safe access to clock monotonic raw
|
||||
*
|
||||
* Contrary to ktime_get_mono_fast_ns() this is always correct because the
|
||||
* conversion factor is not affected by NTP/PTP correction.
|
||||
*/
|
||||
u64 ktime_get_raw_fast_ns(void)
|
||||
{
|
||||
return __ktime_get_fast_ns(&tk_fast_raw);
|
||||
@@ -521,6 +528,9 @@ EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
|
||||
* (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
|
||||
* partially updated. Since the tk->offs_boot update is a rare event, this
|
||||
* should be a rare occurrence which postprocessing should be able to handle.
|
||||
*
|
||||
* The caveats vs. timestamp ordering as documented for ktime_get_fast_ns()
|
||||
* apply as well.
|
||||
*/
|
||||
u64 notrace ktime_get_boot_fast_ns(void)
|
||||
{
|
||||
@@ -530,9 +540,6 @@ u64 notrace ktime_get_boot_fast_ns(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
|
||||
|
||||
/*
|
||||
* See comment for __ktime_get_fast_ns() vs. timestamp ordering
|
||||
*/
|
||||
static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
|
||||
{
|
||||
struct tk_read_base *tkr;
|
||||
@@ -557,6 +564,8 @@ static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
|
||||
|
||||
/**
|
||||
* ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
|
||||
*
|
||||
* See ktime_get_fast_ns() for documentation of the time stamp ordering.
|
||||
*/
|
||||
u64 ktime_get_real_fast_ns(void)
|
||||
{
|
||||
@@ -654,6 +663,7 @@ static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
|
||||
|
||||
/**
|
||||
* pvclock_gtod_register_notifier - register a pvclock timedata update listener
|
||||
* @nb: Pointer to the notifier block to register
|
||||
*/
|
||||
int pvclock_gtod_register_notifier(struct notifier_block *nb)
|
||||
{
|
||||
@@ -673,6 +683,7 @@ EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
|
||||
/**
|
||||
* pvclock_gtod_unregister_notifier - unregister a pvclock
|
||||
* timedata update listener
|
||||
* @nb: Pointer to the notifier block to unregister
|
||||
*/
|
||||
int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
|
||||
{
|
||||
@@ -763,6 +774,7 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
|
||||
|
||||
/**
|
||||
* timekeeping_forward_now - update clock to the current time
|
||||
* @tk: Pointer to the timekeeper to update
|
||||
*
|
||||
* Forward the current clock to update its state since the last call to
|
||||
* update_wall_time(). This is useful before significant clock changes,
|
||||
@@ -1339,7 +1351,7 @@ EXPORT_SYMBOL(do_settimeofday64);
|
||||
|
||||
/**
|
||||
* timekeeping_inject_offset - Adds or subtracts from the current time.
|
||||
* @tv: pointer to the timespec variable containing the offset
|
||||
* @ts: Pointer to the timespec variable containing the offset
|
||||
*
|
||||
* Adds or subtracts an offset value from the current time.
|
||||
*/
|
||||
@@ -1415,9 +1427,8 @@ void timekeeping_warp_clock(void)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
|
||||
*
|
||||
*/
|
||||
static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
|
||||
{
|
||||
@@ -1425,7 +1436,7 @@ static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
|
||||
tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* change_clocksource - Swaps clocksources if a new one is available
|
||||
*
|
||||
* Accumulates current time interval and initializes new clocksource
|
||||
@@ -1548,6 +1559,7 @@ u64 timekeeping_max_deferment(void)
|
||||
|
||||
/**
|
||||
* read_persistent_clock64 - Return time from the persistent clock.
|
||||
* @ts: Pointer to the storage for the readout value
|
||||
*
|
||||
* Weak dummy function for arches that do not yet support it.
|
||||
* Reads the time from the battery backed persistent clock.
|
||||
@@ -1566,8 +1578,9 @@ void __weak read_persistent_clock64(struct timespec64 *ts)
|
||||
* from the boot.
|
||||
*
|
||||
* Weak dummy function for arches that do not yet support it.
|
||||
* wall_time - current time as returned by persistent clock
|
||||
* boot_offset - offset that is defined as wall_time - boot_time
|
||||
* @wall_time: - current time as returned by persistent clock
|
||||
* @boot_offset: - offset that is defined as wall_time - boot_time
|
||||
*
|
||||
* The default function calculates offset based on the current value of
|
||||
* local_clock(). This way architectures that support sched_clock() but don't
|
||||
* support dedicated boot time clock will provide the best estimate of the
|
||||
@@ -1652,7 +1665,8 @@ static struct timespec64 timekeeping_suspend_time;
|
||||
|
||||
/**
|
||||
* __timekeeping_inject_sleeptime - Internal function to add sleep interval
|
||||
* @delta: pointer to a timespec delta value
|
||||
* @tk: Pointer to the timekeeper to be updated
|
||||
* @delta: Pointer to the delta value in timespec64 format
|
||||
*
|
||||
* Takes a timespec offset measuring a suspend interval and properly
|
||||
* adds the sleep offset to the timekeeping variables.
|
||||
@@ -2023,13 +2037,12 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* accumulate_nsecs_to_secs - Accumulates nsecs into secs
|
||||
*
|
||||
* Helper function that accumulates the nsecs greater than a second
|
||||
* from the xtime_nsec field to the xtime_secs field.
|
||||
* It also calls into the NTP code to handle leapsecond processing.
|
||||
*
|
||||
*/
|
||||
static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
|
||||
{
|
||||
@@ -2071,7 +2084,7 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
|
||||
return clock_set;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* logarithmic_accumulation - shifted accumulation of cycles
|
||||
*
|
||||
* This functions accumulates a shifted interval of cycles into
|
||||
@@ -2314,7 +2327,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
|
||||
return base;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
|
||||
*/
|
||||
static int timekeeping_validate_timex(const struct __kernel_timex *txc)
|
||||
|
||||
@@ -26,7 +26,7 @@ extern void do_timer(unsigned long ticks);
|
||||
extern void update_wall_time(void);
|
||||
|
||||
extern raw_spinlock_t jiffies_lock;
|
||||
extern seqcount_t jiffies_seq;
|
||||
extern seqcount_raw_spinlock_t jiffies_seq;
|
||||
|
||||
#define CS_NAME_LEN 32
|
||||
|
||||
|
||||
@@ -1283,7 +1283,7 @@ static void del_timer_wait_running(struct timer_list *timer)
|
||||
u32 tf;
|
||||
|
||||
tf = READ_ONCE(timer->flags);
|
||||
if (!(tf & TIMER_MIGRATING)) {
|
||||
if (!(tf & (TIMER_MIGRATING | TIMER_IRQSAFE))) {
|
||||
struct timer_base *base = get_timer_base(tf);
|
||||
|
||||
/*
|
||||
@@ -1367,6 +1367,13 @@ int del_timer_sync(struct timer_list *timer)
|
||||
*/
|
||||
WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
|
||||
|
||||
/*
|
||||
* Must be able to sleep on PREEMPT_RT because of the slowpath in
|
||||
* del_timer_wait_running().
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE))
|
||||
lockdep_assert_preemption_enabled();
|
||||
|
||||
do {
|
||||
ret = try_to_del_timer_sync(timer);
|
||||
|
||||
@@ -1693,29 +1700,6 @@ void timer_clear_idle(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Called from the timer interrupt handler to charge one tick to the current
|
||||
* process. user_tick is 1 if the tick is user time, 0 for system.
|
||||
*/
|
||||
void update_process_times(int user_tick)
|
||||
{
|
||||
struct task_struct *p = current;
|
||||
|
||||
PRANDOM_ADD_NOISE(jiffies, user_tick, p, 0);
|
||||
|
||||
/* Note: this timer irq context must be accounted for as well. */
|
||||
account_process_tick(p, user_tick);
|
||||
run_local_timers();
|
||||
rcu_sched_clock_irq(user_tick);
|
||||
#ifdef CONFIG_IRQ_WORK
|
||||
if (in_irq())
|
||||
irq_work_tick();
|
||||
#endif
|
||||
scheduler_tick();
|
||||
if (IS_ENABLED(CONFIG_POSIX_TIMERS))
|
||||
run_posix_cpu_timers();
|
||||
}
|
||||
|
||||
/**
|
||||
* __run_timers - run all expired timers (if any) on this CPU.
|
||||
* @base: the timer vector to be processed.
|
||||
@@ -1765,7 +1749,7 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
|
||||
/*
|
||||
* Called by the local, per-CPU timer interrupt on SMP.
|
||||
*/
|
||||
void run_local_timers(void)
|
||||
static void run_local_timers(void)
|
||||
{
|
||||
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
|
||||
|
||||
@@ -1782,6 +1766,29 @@ void run_local_timers(void)
|
||||
raise_softirq(TIMER_SOFTIRQ);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from the timer interrupt handler to charge one tick to the current
|
||||
* process. user_tick is 1 if the tick is user time, 0 for system.
|
||||
*/
|
||||
void update_process_times(int user_tick)
|
||||
{
|
||||
struct task_struct *p = current;
|
||||
|
||||
PRANDOM_ADD_NOISE(jiffies, user_tick, p, 0);
|
||||
|
||||
/* Note: this timer irq context must be accounted for as well. */
|
||||
account_process_tick(p, user_tick);
|
||||
run_local_timers();
|
||||
rcu_sched_clock_irq(user_tick);
|
||||
#ifdef CONFIG_IRQ_WORK
|
||||
if (in_irq())
|
||||
irq_work_tick();
|
||||
#endif
|
||||
scheduler_tick();
|
||||
if (IS_ENABLED(CONFIG_POSIX_TIMERS))
|
||||
run_posix_cpu_timers();
|
||||
}
|
||||
|
||||
/*
|
||||
* Since schedule_timeout()'s timer is defined on the stack, it must store
|
||||
* the target task on the stack as well.
|
||||
|
||||
@@ -42,24 +42,11 @@ static void SEQ_printf(struct seq_file *m, const char *fmt, ...)
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
static void print_name_offset(struct seq_file *m, void *sym)
|
||||
{
|
||||
char symname[KSYM_NAME_LEN];
|
||||
|
||||
if (lookup_symbol_name((unsigned long)sym, symname) < 0)
|
||||
SEQ_printf(m, "<%pK>", sym);
|
||||
else
|
||||
SEQ_printf(m, "%s", symname);
|
||||
}
|
||||
|
||||
static void
|
||||
print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
|
||||
int idx, u64 now)
|
||||
{
|
||||
SEQ_printf(m, " #%d: ", idx);
|
||||
print_name_offset(m, taddr);
|
||||
SEQ_printf(m, ", ");
|
||||
print_name_offset(m, timer->function);
|
||||
SEQ_printf(m, " #%d: <%pK>, %ps", idx, taddr, timer->function);
|
||||
SEQ_printf(m, ", S:%02x", timer->state);
|
||||
SEQ_printf(m, "\n");
|
||||
SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n",
|
||||
@@ -116,9 +103,7 @@ print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
|
||||
|
||||
SEQ_printf(m, " .resolution: %u nsecs\n", hrtimer_resolution);
|
||||
|
||||
SEQ_printf(m, " .get_time: ");
|
||||
print_name_offset(m, base->get_time);
|
||||
SEQ_printf(m, "\n");
|
||||
SEQ_printf(m, " .get_time: %ps\n", base->get_time);
|
||||
#ifdef CONFIG_HIGH_RES_TIMERS
|
||||
SEQ_printf(m, " .offset: %Lu nsecs\n",
|
||||
(unsigned long long) ktime_to_ns(base->offset));
|
||||
@@ -218,42 +203,29 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
|
||||
SEQ_printf(m, " next_event: %Ld nsecs\n",
|
||||
(unsigned long long) ktime_to_ns(dev->next_event));
|
||||
|
||||
SEQ_printf(m, " set_next_event: ");
|
||||
print_name_offset(m, dev->set_next_event);
|
||||
SEQ_printf(m, "\n");
|
||||
SEQ_printf(m, " set_next_event: %ps\n", dev->set_next_event);
|
||||
|
||||
if (dev->set_state_shutdown) {
|
||||
SEQ_printf(m, " shutdown: ");
|
||||
print_name_offset(m, dev->set_state_shutdown);
|
||||
SEQ_printf(m, "\n");
|
||||
}
|
||||
if (dev->set_state_shutdown)
|
||||
SEQ_printf(m, " shutdown: %ps\n",
|
||||
dev->set_state_shutdown);
|
||||
|
||||
if (dev->set_state_periodic) {
|
||||
SEQ_printf(m, " periodic: ");
|
||||
print_name_offset(m, dev->set_state_periodic);
|
||||
SEQ_printf(m, "\n");
|
||||
}
|
||||
if (dev->set_state_periodic)
|
||||
SEQ_printf(m, " periodic: %ps\n",
|
||||
dev->set_state_periodic);
|
||||
|
||||
if (dev->set_state_oneshot) {
|
||||
SEQ_printf(m, " oneshot: ");
|
||||
print_name_offset(m, dev->set_state_oneshot);
|
||||
SEQ_printf(m, "\n");
|
||||
}
|
||||
if (dev->set_state_oneshot)
|
||||
SEQ_printf(m, " oneshot: %ps\n",
|
||||
dev->set_state_oneshot);
|
||||
|
||||
if (dev->set_state_oneshot_stopped) {
|
||||
SEQ_printf(m, " oneshot stopped: ");
|
||||
print_name_offset(m, dev->set_state_oneshot_stopped);
|
||||
SEQ_printf(m, "\n");
|
||||
}
|
||||
if (dev->set_state_oneshot_stopped)
|
||||
SEQ_printf(m, " oneshot stopped: %ps\n",
|
||||
dev->set_state_oneshot_stopped);
|
||||
|
||||
if (dev->tick_resume) {
|
||||
SEQ_printf(m, " resume: ");
|
||||
print_name_offset(m, dev->tick_resume);
|
||||
SEQ_printf(m, "\n");
|
||||
}
|
||||
if (dev->tick_resume)
|
||||
SEQ_printf(m, " resume: %ps\n",
|
||||
dev->tick_resume);
|
||||
|
||||
SEQ_printf(m, " event_handler: ");
|
||||
print_name_offset(m, dev->event_handler);
|
||||
SEQ_printf(m, " event_handler: %ps\n", dev->event_handler);
|
||||
SEQ_printf(m, "\n");
|
||||
SEQ_printf(m, " retries: %lu\n", dev->retries);
|
||||
SEQ_printf(m, "\n");
|
||||
|
||||
Reference in New Issue
Block a user