summaryrefslogtreecommitdiff
path: root/arch/xtensa/kernel/time.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa/kernel/time.c')
-rw-r--r--arch/xtensa/kernel/time.c131
1 files changed, 26 insertions, 105 deletions
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 8df1e842f6d4..8848120d291b 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -14,7 +14,7 @@
#include <linux/errno.h>
#include <linux/time.h>
-#include <linux/timex.h>
+#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -25,27 +25,31 @@
#include <asm/timex.h>
#include <asm/platform.h>
-
-DEFINE_SPINLOCK(rtc_lock);
-EXPORT_SYMBOL(rtc_lock);
-
-
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
unsigned long ccount_per_jiffy; /* per 1/HZ */
unsigned long nsec_per_ccount; /* nsec per ccount increment */
#endif
-static long last_rtc_update = 0;
-
-/*
- * Scheduler clock - returns current tim in nanosec units.
- */
-
-unsigned long long sched_clock(void)
+static cycle_t ccount_read(void)
{
- return (unsigned long long)jiffies * (1000000000 / HZ);
+ return (cycle_t)get_ccount();
}
+static struct clocksource ccount_clocksource = {
+ .name = "ccount",
+ .rating = 200,
+ .read = ccount_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ /*
+ * With a shift of 22 the lower limit of the cpu clock is
+ * 1MHz, where NSEC_PER_CCOUNT is 1000 or a bit less than
+ * 2^10: Since we have 32 bits and the multiplicator can
+ * already take up as much as 10 bits, this leaves us with
+ * remaining upper 22 bits.
+ */
+ .shift = 22,
+};
+
static irqreturn_t timer_interrupt(int irq, void *dev_id);
static struct irqaction timer_irqaction = {
.handler = timer_interrupt,
@@ -55,11 +59,11 @@ static struct irqaction timer_irqaction = {
void __init time_init(void)
{
- time_t sec_o, sec_n = 0;
+ xtime.tv_nsec = 0;
+ xtime.tv_sec = read_persistent_clock();
- /* The platform must provide a function to calibrate the processor
- * speed for the CALIBRATE.
- */
+ set_normalized_timespec(&wall_to_monotonic,
+ -xtime.tv_sec, -xtime.tv_nsec);
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
printk("Calibrating CPU frequency ");
@@ -67,19 +71,10 @@ void __init time_init(void)
printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ),
(int)(ccount_per_jiffy/(10000/HZ))%100);
#endif
-
- /* Set time from RTC (if provided) */
-
- if (platform_get_rtc_time(&sec_o) == 0)
- while (platform_get_rtc_time(&sec_n))
- if (sec_o != sec_n)
- break;
-
- xtime.tv_nsec = 0;
- last_rtc_update = xtime.tv_sec = sec_n;
-
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
+ ccount_clocksource.mult =
+ clocksource_hz2mult(CCOUNT_PER_JIFFY * HZ,
+ ccount_clocksource.shift);
+ clocksource_register(&ccount_clocksource);
/* Initialize the linux timer interrupt. */
@@ -87,69 +82,6 @@ void __init time_init(void)
set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY);
}
-
-int do_settimeofday(struct timespec *tv)
-{
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec, nsec = tv->tv_nsec;
- unsigned long delta;
-
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
- return -EINVAL;
-
- write_seqlock_irq(&xtime_lock);
-
- /* This is revolting. We need to set "xtime" correctly. However, the
- * value in this location is the value at the most recent update of
- * wall time. Discover what correction gettimeofday() would have
- * made, and then undo it!
- */
-
- delta = CCOUNT_PER_JIFFY;
- delta += get_ccount() - get_linux_timer();
- nsec -= delta * NSEC_PER_CCOUNT;
-
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
- ntp_clear();
- write_sequnlock_irq(&xtime_lock);
- return 0;
-}
-
-EXPORT_SYMBOL(do_settimeofday);
-
-
-void do_gettimeofday(struct timeval *tv)
-{
- unsigned long flags;
- unsigned long volatile sec, usec, delta, seq;
-
- do {
- seq = read_seqbegin_irqsave(&xtime_lock, flags);
-
- sec = xtime.tv_sec;
- usec = (xtime.tv_nsec / NSEC_PER_USEC);
-
- delta = get_linux_timer() - get_ccount();
-
- } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
-
- usec += (((unsigned long) CCOUNT_PER_JIFFY - delta)
- * (unsigned long) NSEC_PER_CCOUNT) / NSEC_PER_USEC;
-
- for (; usec >= 1000000; sec++, usec -= 1000000)
- ;
-
- tv->tv_sec = sec;
- tv->tv_usec = usec;
-}
-
-EXPORT_SYMBOL(do_gettimeofday);
-
/*
* The timer interrupt is called HZ times per second.
*/
@@ -178,16 +110,6 @@ again:
next += CCOUNT_PER_JIFFY;
set_linux_timer(next);
- if (ntp_synced() &&
- xtime.tv_sec - last_rtc_update >= 659 &&
- abs((xtime.tv_nsec/1000)-(1000000-1000000/HZ))<5000000/HZ) {
-
- if (platform_set_rtc_time(xtime.tv_sec+1) == 0)
- last_rtc_update = xtime.tv_sec+1;
- else
- /* Do it again in 60 s */
- last_rtc_update += 60;
- }
write_sequnlock(&xtime_lock);
}
@@ -213,4 +135,3 @@ void __cpuinit calibrate_delay(void)
(loops_per_jiffy/(10000/HZ)) % 100);
}
#endif
-