mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 17:41:44 +00:00
6917735e8f
Remove duplicate headers inclusions. Signed-off-by: Jagadeesh Pagadala <jagdsh.linux@gmail.com> Reviewed-by: Mukesh Ojha <mojha@codeaurora.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
1240 lines
32 KiB
C
1240 lines
32 KiB
C
/*
|
|
* Common time routines among all ppc machines.
|
|
*
|
|
* Written by Cort Dougan (cort@cs.nmt.edu) to merge
|
|
* Paul Mackerras' version and mine for PReP and Pmac.
|
|
* MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
|
|
* Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
|
|
*
|
|
* First round of bugfixes by Gabriel Paubert (paubert@iram.es)
|
|
* to make clock more stable (2.4.0-test5). The only thing
|
|
* that this code assumes is that the timebases have been synchronized
|
|
* by firmware on SMP and are never stopped (never do sleep
|
|
* on SMP then, nap and doze are OK).
|
|
*
|
|
* Speeded up do_gettimeofday by getting rid of references to
|
|
* xtime (which required locks for consistency). (mikejc@us.ibm.com)
|
|
*
|
|
* TODO (not necessarily in this file):
|
|
* - improve precision and reproducibility of timebase frequency
|
|
* measurement at boot time.
|
|
* - for astronomical applications: add a new function to get
|
|
* non ambiguous timestamps even around leap seconds. This needs
|
|
* a new timestamp format and a good name.
|
|
*
|
|
* 1997-09-10 Updated NTP code according to technical memorandum Jan '96
|
|
* "A Kernel Model for Precision Timekeeping" by Dave Mills
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/errno.h>
|
|
#include <linux/export.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/clock.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/param.h>
|
|
#include <linux/string.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/timex.h>
|
|
#include <linux/kernel_stat.h>
|
|
#include <linux/time.h>
|
|
#include <linux/init.h>
|
|
#include <linux/profile.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/security.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/rtc.h>
|
|
#include <linux/jiffies.h>
|
|
#include <linux/posix-timers.h>
|
|
#include <linux/irq.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/irq_work.h>
|
|
#include <linux/clk-provider.h>
|
|
#include <linux/suspend.h>
|
|
#include <linux/sched/cputime.h>
|
|
#include <linux/processor.h>
|
|
#include <asm/trace.h>
|
|
|
|
#include <asm/io.h>
|
|
#include <asm/nvram.h>
|
|
#include <asm/cache.h>
|
|
#include <asm/machdep.h>
|
|
#include <linux/uaccess.h>
|
|
#include <asm/time.h>
|
|
#include <asm/prom.h>
|
|
#include <asm/irq.h>
|
|
#include <asm/div64.h>
|
|
#include <asm/smp.h>
|
|
#include <asm/vdso_datapage.h>
|
|
#include <asm/firmware.h>
|
|
#include <asm/asm-prototypes.h>
|
|
|
|
/* powerpc clocksource/clockevent code */
|
|
|
|
#include <linux/clockchips.h>
|
|
#include <linux/timekeeper_internal.h>
|
|
|
|
static u64 rtc_read(struct clocksource *);
|
|
static struct clocksource clocksource_rtc = {
|
|
.name = "rtc",
|
|
.rating = 400,
|
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
|
.mask = CLOCKSOURCE_MASK(64),
|
|
.read = rtc_read,
|
|
};
|
|
|
|
static u64 timebase_read(struct clocksource *);
|
|
static struct clocksource clocksource_timebase = {
|
|
.name = "timebase",
|
|
.rating = 400,
|
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
|
.mask = CLOCKSOURCE_MASK(64),
|
|
.read = timebase_read,
|
|
};
|
|
|
|
#define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
|
|
u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
|
|
|
|
static int decrementer_set_next_event(unsigned long evt,
|
|
struct clock_event_device *dev);
|
|
static int decrementer_shutdown(struct clock_event_device *evt);
|
|
|
|
struct clock_event_device decrementer_clockevent = {
|
|
.name = "decrementer",
|
|
.rating = 200,
|
|
.irq = 0,
|
|
.set_next_event = decrementer_set_next_event,
|
|
.set_state_oneshot_stopped = decrementer_shutdown,
|
|
.set_state_shutdown = decrementer_shutdown,
|
|
.tick_resume = decrementer_shutdown,
|
|
.features = CLOCK_EVT_FEAT_ONESHOT |
|
|
CLOCK_EVT_FEAT_C3STOP,
|
|
};
|
|
EXPORT_SYMBOL(decrementer_clockevent);
|
|
|
|
DEFINE_PER_CPU(u64, decrementers_next_tb);
|
|
static DEFINE_PER_CPU(struct clock_event_device, decrementers);
|
|
|
|
#define XSEC_PER_SEC (1024*1024)
|
|
|
|
#ifdef CONFIG_PPC64
|
|
#define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
|
|
#else
|
|
/* compute ((xsec << 12) * max) >> 32 */
|
|
#define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
|
|
#endif
|
|
|
|
unsigned long tb_ticks_per_jiffy;
|
|
unsigned long tb_ticks_per_usec = 100; /* sane default */
|
|
EXPORT_SYMBOL(tb_ticks_per_usec);
|
|
unsigned long tb_ticks_per_sec;
|
|
EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
|
|
|
|
DEFINE_SPINLOCK(rtc_lock);
|
|
EXPORT_SYMBOL_GPL(rtc_lock);
|
|
|
|
static u64 tb_to_ns_scale __read_mostly;
|
|
static unsigned tb_to_ns_shift __read_mostly;
|
|
static u64 boot_tb __read_mostly;
|
|
|
|
extern struct timezone sys_tz;
|
|
static long timezone_offset;
|
|
|
|
unsigned long ppc_proc_freq;
|
|
EXPORT_SYMBOL_GPL(ppc_proc_freq);
|
|
unsigned long ppc_tb_freq;
|
|
EXPORT_SYMBOL_GPL(ppc_tb_freq);
|
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
|
/*
|
|
* Factor for converting from cputime_t (timebase ticks) to
|
|
* microseconds. This is stored as 0.64 fixed-point binary fraction.
|
|
*/
|
|
u64 __cputime_usec_factor;
|
|
EXPORT_SYMBOL(__cputime_usec_factor);
|
|
|
|
#ifdef CONFIG_PPC_SPLPAR
|
|
void (*dtl_consumer)(struct dtl_entry *, u64);
|
|
#endif
|
|
|
|
static void calc_cputime_factors(void)
|
|
{
|
|
struct div_result res;
|
|
|
|
div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
|
|
__cputime_usec_factor = res.result_low;
|
|
}
|
|
|
|
/*
|
|
* Read the SPURR on systems that have it, otherwise the PURR,
|
|
* or if that doesn't exist return the timebase value passed in.
|
|
*/
|
|
static inline unsigned long read_spurr(unsigned long tb)
|
|
{
|
|
if (cpu_has_feature(CPU_FTR_SPURR))
|
|
return mfspr(SPRN_SPURR);
|
|
if (cpu_has_feature(CPU_FTR_PURR))
|
|
return mfspr(SPRN_PURR);
|
|
return tb;
|
|
}
|
|
|
|
#ifdef CONFIG_PPC_SPLPAR
|
|
|
|
/*
|
|
* Scan the dispatch trace log and count up the stolen time.
|
|
* Should be called with interrupts disabled.
|
|
*/
|
|
static u64 scan_dispatch_log(u64 stop_tb)
|
|
{
|
|
u64 i = local_paca->dtl_ridx;
|
|
struct dtl_entry *dtl = local_paca->dtl_curr;
|
|
struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
|
|
struct lppaca *vpa = local_paca->lppaca_ptr;
|
|
u64 tb_delta;
|
|
u64 stolen = 0;
|
|
u64 dtb;
|
|
|
|
if (!dtl)
|
|
return 0;
|
|
|
|
if (i == be64_to_cpu(vpa->dtl_idx))
|
|
return 0;
|
|
while (i < be64_to_cpu(vpa->dtl_idx)) {
|
|
dtb = be64_to_cpu(dtl->timebase);
|
|
tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
|
|
be32_to_cpu(dtl->ready_to_enqueue_time);
|
|
barrier();
|
|
if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
|
|
/* buffer has overflowed */
|
|
i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
|
|
dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
|
|
continue;
|
|
}
|
|
if (dtb > stop_tb)
|
|
break;
|
|
if (dtl_consumer)
|
|
dtl_consumer(dtl, i);
|
|
stolen += tb_delta;
|
|
++i;
|
|
++dtl;
|
|
if (dtl == dtl_end)
|
|
dtl = local_paca->dispatch_log;
|
|
}
|
|
local_paca->dtl_ridx = i;
|
|
local_paca->dtl_curr = dtl;
|
|
return stolen;
|
|
}
|
|
|
|
/*
|
|
* Accumulate stolen time by scanning the dispatch trace log.
|
|
* Called on entry from user mode.
|
|
*/
|
|
void accumulate_stolen_time(void)
|
|
{
|
|
u64 sst, ust;
|
|
unsigned long save_irq_soft_mask = irq_soft_mask_return();
|
|
struct cpu_accounting_data *acct = &local_paca->accounting;
|
|
|
|
/* We are called early in the exception entry, before
|
|
* soft/hard_enabled are sync'ed to the expected state
|
|
* for the exception. We are hard disabled but the PACA
|
|
* needs to reflect that so various debug stuff doesn't
|
|
* complain
|
|
*/
|
|
irq_soft_mask_set(IRQS_DISABLED);
|
|
|
|
sst = scan_dispatch_log(acct->starttime_user);
|
|
ust = scan_dispatch_log(acct->starttime);
|
|
acct->stime -= sst;
|
|
acct->utime -= ust;
|
|
acct->steal_time += ust + sst;
|
|
|
|
irq_soft_mask_set(save_irq_soft_mask);
|
|
}
|
|
|
|
static inline u64 calculate_stolen_time(u64 stop_tb)
|
|
{
|
|
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
|
|
return 0;
|
|
|
|
if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
|
|
return scan_dispatch_log(stop_tb);
|
|
|
|
return 0;
|
|
}
|
|
|
|
#else /* CONFIG_PPC_SPLPAR */
|
|
static inline u64 calculate_stolen_time(u64 stop_tb)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#endif /* CONFIG_PPC_SPLPAR */
|
|
|
|
/*
|
|
* Account time for a transition between system, hard irq
|
|
* or soft irq state.
|
|
*/
|
|
static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct,
|
|
unsigned long now, unsigned long stime)
|
|
{
|
|
unsigned long stime_scaled = 0;
|
|
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
|
|
unsigned long nowscaled, deltascaled;
|
|
unsigned long utime, utime_scaled;
|
|
|
|
nowscaled = read_spurr(now);
|
|
deltascaled = nowscaled - acct->startspurr;
|
|
acct->startspurr = nowscaled;
|
|
utime = acct->utime - acct->utime_sspurr;
|
|
acct->utime_sspurr = acct->utime;
|
|
|
|
/*
|
|
* Because we don't read the SPURR on every kernel entry/exit,
|
|
* deltascaled includes both user and system SPURR ticks.
|
|
* Apportion these ticks to system SPURR ticks and user
|
|
* SPURR ticks in the same ratio as the system time (delta)
|
|
* and user time (udelta) values obtained from the timebase
|
|
* over the same interval. The system ticks get accounted here;
|
|
* the user ticks get saved up in paca->user_time_scaled to be
|
|
* used by account_process_tick.
|
|
*/
|
|
stime_scaled = stime;
|
|
utime_scaled = utime;
|
|
if (deltascaled != stime + utime) {
|
|
if (utime) {
|
|
stime_scaled = deltascaled * stime / (stime + utime);
|
|
utime_scaled = deltascaled - stime_scaled;
|
|
} else {
|
|
stime_scaled = deltascaled;
|
|
}
|
|
}
|
|
acct->utime_scaled += utime_scaled;
|
|
#endif
|
|
|
|
return stime_scaled;
|
|
}
|
|
|
|
static unsigned long vtime_delta(struct task_struct *tsk,
|
|
unsigned long *stime_scaled,
|
|
unsigned long *steal_time)
|
|
{
|
|
unsigned long now, stime;
|
|
struct cpu_accounting_data *acct = get_accounting(tsk);
|
|
|
|
WARN_ON_ONCE(!irqs_disabled());
|
|
|
|
now = mftb();
|
|
stime = now - acct->starttime;
|
|
acct->starttime = now;
|
|
|
|
*stime_scaled = vtime_delta_scaled(acct, now, stime);
|
|
|
|
*steal_time = calculate_stolen_time(now);
|
|
|
|
return stime;
|
|
}
|
|
|
|
void vtime_account_system(struct task_struct *tsk)
|
|
{
|
|
unsigned long stime, stime_scaled, steal_time;
|
|
struct cpu_accounting_data *acct = get_accounting(tsk);
|
|
|
|
stime = vtime_delta(tsk, &stime_scaled, &steal_time);
|
|
|
|
stime -= min(stime, steal_time);
|
|
acct->steal_time += steal_time;
|
|
|
|
if ((tsk->flags & PF_VCPU) && !irq_count()) {
|
|
acct->gtime += stime;
|
|
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
|
|
acct->utime_scaled += stime_scaled;
|
|
#endif
|
|
} else {
|
|
if (hardirq_count())
|
|
acct->hardirq_time += stime;
|
|
else if (in_serving_softirq())
|
|
acct->softirq_time += stime;
|
|
else
|
|
acct->stime += stime;
|
|
|
|
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
|
|
acct->stime_scaled += stime_scaled;
|
|
#endif
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(vtime_account_system);
|
|
|
|
void vtime_account_idle(struct task_struct *tsk)
|
|
{
|
|
unsigned long stime, stime_scaled, steal_time;
|
|
struct cpu_accounting_data *acct = get_accounting(tsk);
|
|
|
|
stime = vtime_delta(tsk, &stime_scaled, &steal_time);
|
|
acct->idle_time += stime + steal_time;
|
|
}
|
|
|
|
static void vtime_flush_scaled(struct task_struct *tsk,
|
|
struct cpu_accounting_data *acct)
|
|
{
|
|
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
|
|
if (acct->utime_scaled)
|
|
tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
|
|
if (acct->stime_scaled)
|
|
tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
|
|
|
|
acct->utime_scaled = 0;
|
|
acct->utime_sspurr = 0;
|
|
acct->stime_scaled = 0;
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* Account the whole cputime accumulated in the paca
|
|
* Must be called with interrupts disabled.
|
|
* Assumes that vtime_account_system/idle() has been called
|
|
* recently (i.e. since the last entry from usermode) so that
|
|
* get_paca()->user_time_scaled is up to date.
|
|
*/
|
|
void vtime_flush(struct task_struct *tsk)
|
|
{
|
|
struct cpu_accounting_data *acct = get_accounting(tsk);
|
|
|
|
if (acct->utime)
|
|
account_user_time(tsk, cputime_to_nsecs(acct->utime));
|
|
|
|
if (acct->gtime)
|
|
account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
|
|
|
|
if (IS_ENABLED(CONFIG_PPC_SPLPAR) && acct->steal_time) {
|
|
account_steal_time(cputime_to_nsecs(acct->steal_time));
|
|
acct->steal_time = 0;
|
|
}
|
|
|
|
if (acct->idle_time)
|
|
account_idle_time(cputime_to_nsecs(acct->idle_time));
|
|
|
|
if (acct->stime)
|
|
account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
|
|
CPUTIME_SYSTEM);
|
|
|
|
if (acct->hardirq_time)
|
|
account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
|
|
CPUTIME_IRQ);
|
|
if (acct->softirq_time)
|
|
account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
|
|
CPUTIME_SOFTIRQ);
|
|
|
|
vtime_flush_scaled(tsk, acct);
|
|
|
|
acct->utime = 0;
|
|
acct->gtime = 0;
|
|
acct->idle_time = 0;
|
|
acct->stime = 0;
|
|
acct->hardirq_time = 0;
|
|
acct->softirq_time = 0;
|
|
}
|
|
|
|
#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
|
#define calc_cputime_factors()
|
|
#endif
|
|
|
|
void __delay(unsigned long loops)
|
|
{
|
|
unsigned long start;
|
|
int diff;
|
|
|
|
spin_begin();
|
|
if (__USE_RTC()) {
|
|
start = get_rtcl();
|
|
do {
|
|
/* the RTCL register wraps at 1000000000 */
|
|
diff = get_rtcl() - start;
|
|
if (diff < 0)
|
|
diff += 1000000000;
|
|
spin_cpu_relax();
|
|
} while (diff < loops);
|
|
} else {
|
|
start = get_tbl();
|
|
while (get_tbl() - start < loops)
|
|
spin_cpu_relax();
|
|
}
|
|
spin_end();
|
|
}
|
|
EXPORT_SYMBOL(__delay);
|
|
|
|
void udelay(unsigned long usecs)
|
|
{
|
|
__delay(tb_ticks_per_usec * usecs);
|
|
}
|
|
EXPORT_SYMBOL(udelay);
|
|
|
|
#ifdef CONFIG_SMP
|
|
unsigned long profile_pc(struct pt_regs *regs)
|
|
{
|
|
unsigned long pc = instruction_pointer(regs);
|
|
|
|
if (in_lock_functions(pc))
|
|
return regs->link;
|
|
|
|
return pc;
|
|
}
|
|
EXPORT_SYMBOL(profile_pc);
|
|
#endif
|
|
|
|
#ifdef CONFIG_IRQ_WORK
|
|
|
|
/*
|
|
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
|
|
*/
|
|
#ifdef CONFIG_PPC64
|
|
static inline unsigned long test_irq_work_pending(void)
|
|
{
|
|
unsigned long x;
|
|
|
|
asm volatile("lbz %0,%1(13)"
|
|
: "=r" (x)
|
|
: "i" (offsetof(struct paca_struct, irq_work_pending)));
|
|
return x;
|
|
}
|
|
|
|
static inline void set_irq_work_pending_flag(void)
|
|
{
|
|
asm volatile("stb %0,%1(13)" : :
|
|
"r" (1),
|
|
"i" (offsetof(struct paca_struct, irq_work_pending)));
|
|
}
|
|
|
|
static inline void clear_irq_work_pending(void)
|
|
{
|
|
asm volatile("stb %0,%1(13)" : :
|
|
"r" (0),
|
|
"i" (offsetof(struct paca_struct, irq_work_pending)));
|
|
}
|
|
|
|
void arch_irq_work_raise(void)
|
|
{
|
|
preempt_disable();
|
|
set_irq_work_pending_flag();
|
|
/*
|
|
* Non-nmi code running with interrupts disabled will replay
|
|
* irq_happened before it re-enables interrupts, so setthe
|
|
* decrementer there instead of causing a hardware exception
|
|
* which would immediately hit the masked interrupt handler
|
|
* and have the net effect of setting the decrementer in
|
|
* irq_happened.
|
|
*
|
|
* NMI interrupts can not check this when they return, so the
|
|
* decrementer hardware exception is raised, which will fire
|
|
* when interrupts are next enabled.
|
|
*
|
|
* BookE does not support this yet, it must audit all NMI
|
|
* interrupt handlers to ensure they call nmi_enter() so this
|
|
* check would be correct.
|
|
*/
|
|
if (IS_ENABLED(CONFIG_BOOKE) || !irqs_disabled() || in_nmi()) {
|
|
set_dec(1);
|
|
} else {
|
|
hard_irq_disable();
|
|
local_paca->irq_happened |= PACA_IRQ_DEC;
|
|
}
|
|
preempt_enable();
|
|
}
|
|
|
|
#else /* 32-bit */
|
|
|
|
DEFINE_PER_CPU(u8, irq_work_pending);
|
|
|
|
#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
|
|
#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
|
|
#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
|
|
|
|
void arch_irq_work_raise(void)
|
|
{
|
|
preempt_disable();
|
|
set_irq_work_pending_flag();
|
|
set_dec(1);
|
|
preempt_enable();
|
|
}
|
|
|
|
#endif /* 32 vs 64 bit */
|
|
|
|
#else /* CONFIG_IRQ_WORK */
|
|
|
|
#define test_irq_work_pending() 0
|
|
#define clear_irq_work_pending()
|
|
|
|
#endif /* CONFIG_IRQ_WORK */
|
|
|
|
/*
|
|
* timer_interrupt - gets called when the decrementer overflows,
|
|
* with interrupts disabled.
|
|
*/
|
|
void timer_interrupt(struct pt_regs *regs)
|
|
{
|
|
struct clock_event_device *evt = this_cpu_ptr(&decrementers);
|
|
u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
|
|
struct pt_regs *old_regs;
|
|
u64 now;
|
|
|
|
/* Some implementations of hotplug will get timer interrupts while
|
|
* offline, just ignore these and we also need to set
|
|
* decrementers_next_tb as MAX to make sure __check_irq_replay
|
|
* don't replay timer interrupt when return, otherwise we'll trap
|
|
* here infinitely :(
|
|
*/
|
|
if (unlikely(!cpu_online(smp_processor_id()))) {
|
|
*next_tb = ~(u64)0;
|
|
set_dec(decrementer_max);
|
|
return;
|
|
}
|
|
|
|
/* Ensure a positive value is written to the decrementer, or else
|
|
* some CPUs will continue to take decrementer exceptions. When the
|
|
* PPC_WATCHDOG (decrementer based) is configured, keep this at most
|
|
* 31 bits, which is about 4 seconds on most systems, which gives
|
|
* the watchdog a chance of catching timer interrupt hard lockups.
|
|
*/
|
|
if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
|
|
set_dec(0x7fffffff);
|
|
else
|
|
set_dec(decrementer_max);
|
|
|
|
/* Conditionally hard-enable interrupts now that the DEC has been
|
|
* bumped to its maximum value
|
|
*/
|
|
may_hard_irq_enable();
|
|
|
|
|
|
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
|
|
if (atomic_read(&ppc_n_lost_interrupts) != 0)
|
|
do_IRQ(regs);
|
|
#endif
|
|
|
|
old_regs = set_irq_regs(regs);
|
|
irq_enter();
|
|
trace_timer_interrupt_entry(regs);
|
|
|
|
if (test_irq_work_pending()) {
|
|
clear_irq_work_pending();
|
|
irq_work_run();
|
|
}
|
|
|
|
now = get_tb_or_rtc();
|
|
if (now >= *next_tb) {
|
|
*next_tb = ~(u64)0;
|
|
if (evt->event_handler)
|
|
evt->event_handler(evt);
|
|
__this_cpu_inc(irq_stat.timer_irqs_event);
|
|
} else {
|
|
now = *next_tb - now;
|
|
if (now <= decrementer_max)
|
|
set_dec(now);
|
|
/* We may have raced with new irq work */
|
|
if (test_irq_work_pending())
|
|
set_dec(1);
|
|
__this_cpu_inc(irq_stat.timer_irqs_others);
|
|
}
|
|
|
|
trace_timer_interrupt_exit(regs);
|
|
irq_exit();
|
|
set_irq_regs(old_regs);
|
|
}
|
|
EXPORT_SYMBOL(timer_interrupt);
|
|
|
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
|
void timer_broadcast_interrupt(void)
|
|
{
|
|
u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
|
|
|
|
*next_tb = ~(u64)0;
|
|
tick_receive_broadcast();
|
|
__this_cpu_inc(irq_stat.broadcast_irqs_event);
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Hypervisor decrementer interrupts shouldn't occur but are sometimes
|
|
* left pending on exit from a KVM guest. We don't need to do anything
|
|
* to clear them, as they are edge-triggered.
|
|
*/
|
|
void hdec_interrupt(struct pt_regs *regs)
|
|
{
|
|
}
|
|
|
|
#ifdef CONFIG_SUSPEND
|
|
static void generic_suspend_disable_irqs(void)
|
|
{
|
|
/* Disable the decrementer, so that it doesn't interfere
|
|
* with suspending.
|
|
*/
|
|
|
|
set_dec(decrementer_max);
|
|
local_irq_disable();
|
|
set_dec(decrementer_max);
|
|
}
|
|
|
|
static void generic_suspend_enable_irqs(void)
|
|
{
|
|
local_irq_enable();
|
|
}
|
|
|
|
/* Overrides the weak version in kernel/power/main.c */
|
|
void arch_suspend_disable_irqs(void)
|
|
{
|
|
if (ppc_md.suspend_disable_irqs)
|
|
ppc_md.suspend_disable_irqs();
|
|
generic_suspend_disable_irqs();
|
|
}
|
|
|
|
/* Overrides the weak version in kernel/power/main.c */
|
|
void arch_suspend_enable_irqs(void)
|
|
{
|
|
generic_suspend_enable_irqs();
|
|
if (ppc_md.suspend_enable_irqs)
|
|
ppc_md.suspend_enable_irqs();
|
|
}
|
|
#endif
|
|
|
|
unsigned long long tb_to_ns(unsigned long long ticks)
|
|
{
|
|
return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
|
|
}
|
|
EXPORT_SYMBOL_GPL(tb_to_ns);
|
|
|
|
/*
|
|
* Scheduler clock - returns current time in nanosec units.
|
|
*
|
|
* Note: mulhdu(a, b) (multiply high double unsigned) returns
|
|
* the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
|
|
* are 64-bit unsigned numbers.
|
|
*/
|
|
notrace unsigned long long sched_clock(void)
|
|
{
|
|
if (__USE_RTC())
|
|
return get_rtc();
|
|
return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_PPC_PSERIES
|
|
|
|
/*
|
|
* Running clock - attempts to give a view of time passing for a virtualised
|
|
* kernels.
|
|
* Uses the VTB register if available otherwise a next best guess.
|
|
*/
|
|
unsigned long long running_clock(void)
|
|
{
|
|
/*
|
|
* Don't read the VTB as a host since KVM does not switch in host
|
|
* timebase into the VTB when it takes a guest off the CPU, reading the
|
|
* VTB would result in reading 'last switched out' guest VTB.
|
|
*
|
|
* Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
|
|
* would be unsafe to rely only on the #ifdef above.
|
|
*/
|
|
if (firmware_has_feature(FW_FEATURE_LPAR) &&
|
|
cpu_has_feature(CPU_FTR_ARCH_207S))
|
|
return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
|
|
|
|
/*
|
|
* This is a next best approximation without a VTB.
|
|
* On a host which is running bare metal there should never be any stolen
|
|
* time and on a host which doesn't do any virtualisation TB *should* equal
|
|
* VTB so it makes no difference anyway.
|
|
*/
|
|
return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL];
|
|
}
|
|
#endif
|
|
|
|
static int __init get_freq(char *name, int cells, unsigned long *val)
|
|
{
|
|
struct device_node *cpu;
|
|
const __be32 *fp;
|
|
int found = 0;
|
|
|
|
/* The cpu node should have timebase and clock frequency properties */
|
|
cpu = of_find_node_by_type(NULL, "cpu");
|
|
|
|
if (cpu) {
|
|
fp = of_get_property(cpu, name, NULL);
|
|
if (fp) {
|
|
found = 1;
|
|
*val = of_read_ulong(fp, cells);
|
|
}
|
|
|
|
of_node_put(cpu);
|
|
}
|
|
|
|
return found;
|
|
}
|
|
|
|
static void start_cpu_decrementer(void)
|
|
{
|
|
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
|
|
unsigned int tcr;
|
|
|
|
/* Clear any pending timer interrupts */
|
|
mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
|
|
|
|
tcr = mfspr(SPRN_TCR);
|
|
/*
|
|
* The watchdog may have already been enabled by u-boot. So leave
|
|
* TRC[WP] (Watchdog Period) alone.
|
|
*/
|
|
tcr &= TCR_WP_MASK; /* Clear all bits except for TCR[WP] */
|
|
tcr |= TCR_DIE; /* Enable decrementer */
|
|
mtspr(SPRN_TCR, tcr);
|
|
#endif
|
|
}
|
|
|
|
void __init generic_calibrate_decr(void)
|
|
{
|
|
ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
|
|
|
|
if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
|
|
!get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
|
|
|
|
printk(KERN_ERR "WARNING: Estimating decrementer frequency "
|
|
"(not found)\n");
|
|
}
|
|
|
|
ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
|
|
|
|
if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
|
|
!get_freq("clock-frequency", 1, &ppc_proc_freq)) {
|
|
|
|
printk(KERN_ERR "WARNING: Estimating processor frequency "
|
|
"(not found)\n");
|
|
}
|
|
}
|
|
|
|
int update_persistent_clock64(struct timespec64 now)
|
|
{
|
|
struct rtc_time tm;
|
|
|
|
if (!ppc_md.set_rtc_time)
|
|
return -ENODEV;
|
|
|
|
rtc_time64_to_tm(now.tv_sec + 1 + timezone_offset, &tm);
|
|
|
|
return ppc_md.set_rtc_time(&tm);
|
|
}
|
|
|
|
static void __read_persistent_clock(struct timespec64 *ts)
|
|
{
|
|
struct rtc_time tm;
|
|
static int first = 1;
|
|
|
|
ts->tv_nsec = 0;
|
|
/* XXX this is a litle fragile but will work okay in the short term */
|
|
if (first) {
|
|
first = 0;
|
|
if (ppc_md.time_init)
|
|
timezone_offset = ppc_md.time_init();
|
|
|
|
/* get_boot_time() isn't guaranteed to be safe to call late */
|
|
if (ppc_md.get_boot_time) {
|
|
ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
|
|
return;
|
|
}
|
|
}
|
|
if (!ppc_md.get_rtc_time) {
|
|
ts->tv_sec = 0;
|
|
return;
|
|
}
|
|
ppc_md.get_rtc_time(&tm);
|
|
|
|
ts->tv_sec = rtc_tm_to_time64(&tm);
|
|
}
|
|
|
|
void read_persistent_clock64(struct timespec64 *ts)
|
|
{
|
|
__read_persistent_clock(ts);
|
|
|
|
/* Sanitize it in case real time clock is set below EPOCH */
|
|
if (ts->tv_sec < 0) {
|
|
ts->tv_sec = 0;
|
|
ts->tv_nsec = 0;
|
|
}
|
|
|
|
}
|
|
|
|
/* clocksource code */
|
|
static notrace u64 rtc_read(struct clocksource *cs)
|
|
{
|
|
return (u64)get_rtc();
|
|
}
|
|
|
|
static notrace u64 timebase_read(struct clocksource *cs)
|
|
{
|
|
return (u64)get_tb();
|
|
}
|
|
|
|
|
|
void update_vsyscall(struct timekeeper *tk)
|
|
{
|
|
struct timespec xt;
|
|
struct clocksource *clock = tk->tkr_mono.clock;
|
|
u32 mult = tk->tkr_mono.mult;
|
|
u32 shift = tk->tkr_mono.shift;
|
|
u64 cycle_last = tk->tkr_mono.cycle_last;
|
|
u64 new_tb_to_xs, new_stamp_xsec;
|
|
u64 frac_sec;
|
|
|
|
if (clock != &clocksource_timebase)
|
|
return;
|
|
|
|
xt.tv_sec = tk->xtime_sec;
|
|
xt.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
|
|
|
|
/* Make userspace gettimeofday spin until we're done. */
|
|
++vdso_data->tb_update_count;
|
|
smp_mb();
|
|
|
|
/*
|
|
* This computes ((2^20 / 1e9) * mult) >> shift as a
|
|
* 0.64 fixed-point fraction.
|
|
* The computation in the else clause below won't overflow
|
|
* (as long as the timebase frequency is >= 1.049 MHz)
|
|
* but loses precision because we lose the low bits of the constant
|
|
* in the shift. Note that 19342813113834067 ~= 2^(20+64) / 1e9.
|
|
* For a shift of 24 the error is about 0.5e-9, or about 0.5ns
|
|
* over a second. (Shift values are usually 22, 23 or 24.)
|
|
* For high frequency clocks such as the 512MHz timebase clock
|
|
* on POWER[6789], the mult value is small (e.g. 32768000)
|
|
* and so we can shift the constant by 16 initially
|
|
* (295147905179 ~= 2^(20+64-16) / 1e9) and then do the
|
|
* remaining shifts after the multiplication, which gives a
|
|
* more accurate result (e.g. with mult = 32768000, shift = 24,
|
|
* the error is only about 1.2e-12, or 0.7ns over 10 minutes).
|
|
*/
|
|
if (mult <= 62500000 && clock->shift >= 16)
|
|
new_tb_to_xs = ((u64) mult * 295147905179ULL) >> (clock->shift - 16);
|
|
else
|
|
new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
|
|
|
|
/*
|
|
* Compute the fractional second in units of 2^-32 seconds.
|
|
* The fractional second is tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift
|
|
* in nanoseconds, so multiplying that by 2^32 / 1e9 gives
|
|
* it in units of 2^-32 seconds.
|
|
* We assume shift <= 32 because clocks_calc_mult_shift()
|
|
* generates shift values in the range 0 - 32.
|
|
*/
|
|
frac_sec = tk->tkr_mono.xtime_nsec << (32 - shift);
|
|
do_div(frac_sec, NSEC_PER_SEC);
|
|
|
|
/*
|
|
* Work out new stamp_xsec value for any legacy users of systemcfg.
|
|
* stamp_xsec is in units of 2^-20 seconds.
|
|
*/
|
|
new_stamp_xsec = frac_sec >> 12;
|
|
new_stamp_xsec += tk->xtime_sec * XSEC_PER_SEC;
|
|
|
|
/*
|
|
* tb_update_count is used to allow the userspace gettimeofday code
|
|
* to assure itself that it sees a consistent view of the tb_to_xs and
|
|
* stamp_xsec variables. It reads the tb_update_count, then reads
|
|
* tb_to_xs and stamp_xsec and then reads tb_update_count again. If
|
|
* the two values of tb_update_count match and are even then the
|
|
* tb_to_xs and stamp_xsec values are consistent. If not, then it
|
|
* loops back and reads them again until this criteria is met.
|
|
*/
|
|
vdso_data->tb_orig_stamp = cycle_last;
|
|
vdso_data->stamp_xsec = new_stamp_xsec;
|
|
vdso_data->tb_to_xs = new_tb_to_xs;
|
|
vdso_data->wtom_clock_sec = tk->wall_to_monotonic.tv_sec;
|
|
vdso_data->wtom_clock_nsec = tk->wall_to_monotonic.tv_nsec;
|
|
vdso_data->stamp_xtime = xt;
|
|
vdso_data->stamp_sec_fraction = frac_sec;
|
|
smp_wmb();
|
|
++(vdso_data->tb_update_count);
|
|
}
|
|
|
|
void update_vsyscall_tz(void)
|
|
{
|
|
vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
|
|
vdso_data->tz_dsttime = sys_tz.tz_dsttime;
|
|
}
|
|
|
|
static void __init clocksource_init(void)
|
|
{
|
|
struct clocksource *clock;
|
|
|
|
if (__USE_RTC())
|
|
clock = &clocksource_rtc;
|
|
else
|
|
clock = &clocksource_timebase;
|
|
|
|
if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
|
|
printk(KERN_ERR "clocksource: %s is already registered\n",
|
|
clock->name);
|
|
return;
|
|
}
|
|
|
|
printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
|
|
clock->name, clock->mult, clock->shift);
|
|
}
|
|
|
|
static int decrementer_set_next_event(unsigned long evt,
|
|
struct clock_event_device *dev)
|
|
{
|
|
__this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt);
|
|
set_dec(evt);
|
|
|
|
/* We may have raced with new irq work */
|
|
if (test_irq_work_pending())
|
|
set_dec(1);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int decrementer_shutdown(struct clock_event_device *dev)
|
|
{
|
|
decrementer_set_next_event(decrementer_max, dev);
|
|
return 0;
|
|
}
|
|
|
|
static void register_decrementer_clockevent(int cpu)
|
|
{
|
|
struct clock_event_device *dec = &per_cpu(decrementers, cpu);
|
|
|
|
*dec = decrementer_clockevent;
|
|
dec->cpumask = cpumask_of(cpu);
|
|
|
|
clockevents_config_and_register(dec, ppc_tb_freq, 2, decrementer_max);
|
|
|
|
printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
|
|
dec->name, dec->mult, dec->shift, cpu);
|
|
|
|
/* Set values for KVM, see kvm_emulate_dec() */
|
|
decrementer_clockevent.mult = dec->mult;
|
|
decrementer_clockevent.shift = dec->shift;
|
|
}
|
|
|
|
static void enable_large_decrementer(void)
|
|
{
|
|
if (!cpu_has_feature(CPU_FTR_ARCH_300))
|
|
return;
|
|
|
|
if (decrementer_max <= DECREMENTER_DEFAULT_MAX)
|
|
return;
|
|
|
|
/*
|
|
* If we're running as the hypervisor we need to enable the LD manually
|
|
* otherwise firmware should have done it for us.
|
|
*/
|
|
if (cpu_has_feature(CPU_FTR_HVMODE))
|
|
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD);
|
|
}
|
|
|
|
static void __init set_decrementer_max(void)
|
|
{
|
|
struct device_node *cpu;
|
|
u32 bits = 32;
|
|
|
|
/* Prior to ISAv3 the decrementer is always 32 bit */
|
|
if (!cpu_has_feature(CPU_FTR_ARCH_300))
|
|
return;
|
|
|
|
cpu = of_find_node_by_type(NULL, "cpu");
|
|
|
|
if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
|
|
if (bits > 64 || bits < 32) {
|
|
pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
|
|
bits = 32;
|
|
}
|
|
|
|
/* calculate the signed maximum given this many bits */
|
|
decrementer_max = (1ul << (bits - 1)) - 1;
|
|
}
|
|
|
|
of_node_put(cpu);
|
|
|
|
pr_info("time_init: %u bit decrementer (max: %llx)\n",
|
|
bits, decrementer_max);
|
|
}
|
|
|
|
static void __init init_decrementer_clockevent(void)
|
|
{
|
|
register_decrementer_clockevent(smp_processor_id());
|
|
}
|
|
|
|
void secondary_cpu_time_init(void)
|
|
{
|
|
/* Enable and test the large decrementer for this cpu */
|
|
enable_large_decrementer();
|
|
|
|
/* Start the decrementer on CPUs that have manual control
|
|
* such as BookE
|
|
*/
|
|
start_cpu_decrementer();
|
|
|
|
/* FIME: Should make unrelatred change to move snapshot_timebase
|
|
* call here ! */
|
|
register_decrementer_clockevent(smp_processor_id());
|
|
}
|
|
|
|
/* This function is only called on the boot processor */
|
|
void __init time_init(void)
|
|
{
|
|
struct div_result res;
|
|
u64 scale;
|
|
unsigned shift;
|
|
|
|
if (__USE_RTC()) {
|
|
/* 601 processor: dec counts down by 128 every 128ns */
|
|
ppc_tb_freq = 1000000000;
|
|
} else {
|
|
/* Normal PowerPC with timebase register */
|
|
ppc_md.calibrate_decr();
|
|
printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
|
|
ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
|
|
printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
|
|
ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
|
|
}
|
|
|
|
tb_ticks_per_jiffy = ppc_tb_freq / HZ;
|
|
tb_ticks_per_sec = ppc_tb_freq;
|
|
tb_ticks_per_usec = ppc_tb_freq / 1000000;
|
|
calc_cputime_factors();
|
|
|
|
/*
|
|
* Compute scale factor for sched_clock.
|
|
* The calibrate_decr() function has set tb_ticks_per_sec,
|
|
* which is the timebase frequency.
|
|
* We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
|
|
* the 128-bit result as a 64.64 fixed-point number.
|
|
* We then shift that number right until it is less than 1.0,
|
|
* giving us the scale factor and shift count to use in
|
|
* sched_clock().
|
|
*/
|
|
div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
|
|
scale = res.result_low;
|
|
for (shift = 0; res.result_high != 0; ++shift) {
|
|
scale = (scale >> 1) | (res.result_high << 63);
|
|
res.result_high >>= 1;
|
|
}
|
|
tb_to_ns_scale = scale;
|
|
tb_to_ns_shift = shift;
|
|
/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
|
|
boot_tb = get_tb_or_rtc();
|
|
|
|
/* If platform provided a timezone (pmac), we correct the time */
|
|
if (timezone_offset) {
|
|
sys_tz.tz_minuteswest = -timezone_offset / 60;
|
|
sys_tz.tz_dsttime = 0;
|
|
}
|
|
|
|
vdso_data->tb_update_count = 0;
|
|
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
|
|
|
|
/* initialise and enable the large decrementer (if we have one) */
|
|
set_decrementer_max();
|
|
enable_large_decrementer();
|
|
|
|
/* Start the decrementer on CPUs that have manual control
|
|
* such as BookE
|
|
*/
|
|
start_cpu_decrementer();
|
|
|
|
/* Register the clocksource */
|
|
clocksource_init();
|
|
|
|
init_decrementer_clockevent();
|
|
tick_setup_hrtimer_broadcast();
|
|
|
|
#ifdef CONFIG_COMMON_CLK
|
|
of_clk_init(NULL);
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
|
|
* result.
|
|
*/
|
|
void div128_by_32(u64 dividend_high, u64 dividend_low,
|
|
unsigned divisor, struct div_result *dr)
|
|
{
|
|
unsigned long a, b, c, d;
|
|
unsigned long w, x, y, z;
|
|
u64 ra, rb, rc;
|
|
|
|
a = dividend_high >> 32;
|
|
b = dividend_high & 0xffffffff;
|
|
c = dividend_low >> 32;
|
|
d = dividend_low & 0xffffffff;
|
|
|
|
w = a / divisor;
|
|
ra = ((u64)(a - (w * divisor)) << 32) + b;
|
|
|
|
rb = ((u64) do_div(ra, divisor) << 32) + c;
|
|
x = ra;
|
|
|
|
rc = ((u64) do_div(rb, divisor) << 32) + d;
|
|
y = rb;
|
|
|
|
do_div(rc, divisor);
|
|
z = rc;
|
|
|
|
dr->result_high = ((u64)w << 32) + x;
|
|
dr->result_low = ((u64)y << 32) + z;
|
|
|
|
}
|
|
|
|
/* We don't need to calibrate delay, we use the CPU timebase for that */
|
|
void calibrate_delay(void)
|
|
{
|
|
/* Some generic code (such as spinlock debug) use loops_per_jiffy
|
|
* as the number of __delay(1) in a jiffy, so make it so
|
|
*/
|
|
loops_per_jiffy = tb_ticks_per_jiffy;
|
|
}
|
|
|
|
#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
|
|
static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
|
|
{
|
|
ppc_md.get_rtc_time(tm);
|
|
return 0;
|
|
}
|
|
|
|
static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
|
|
{
|
|
if (!ppc_md.set_rtc_time)
|
|
return -EOPNOTSUPP;
|
|
|
|
if (ppc_md.set_rtc_time(tm) < 0)
|
|
return -EOPNOTSUPP;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct rtc_class_ops rtc_generic_ops = {
|
|
.read_time = rtc_generic_get_time,
|
|
.set_time = rtc_generic_set_time,
|
|
};
|
|
|
|
static int __init rtc_init(void)
|
|
{
|
|
struct platform_device *pdev;
|
|
|
|
if (!ppc_md.get_rtc_time)
|
|
return -ENODEV;
|
|
|
|
pdev = platform_device_register_data(NULL, "rtc-generic", -1,
|
|
&rtc_generic_ops,
|
|
sizeof(rtc_generic_ops));
|
|
|
|
return PTR_ERR_OR_ZERO(pdev);
|
|
}
|
|
|
|
device_initcall(rtc_init);
|
|
#endif
|