| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Paul Mundt | a23ba43 | 2007-11-28 20:19:38 +0900 | [diff] [blame] | 2 | * arch/sh/kernel/time_64.c | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * | 
|  | 4 | * Copyright (C) 2000, 2001  Paolo Alberelli | 
| Paul Mundt | 6c7e2a5 | 2007-11-08 14:45:55 +0900 | [diff] [blame] | 5 | * Copyright (C) 2003 - 2007  Paul Mundt | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * Copyright (C) 2003  Richard Curnow | 
|  | 7 | * | 
|  | 8 | *    Original TMU/RTC code taken from sh version. | 
|  | 9 | *    Copyright (C) 1999  Tetsuya Okada & Niibe Yutaka | 
|  | 10 | *      Some code taken from i386 version. | 
|  | 11 | *      Copyright (C) 1991, 1992, 1995  Linus Torvalds | 
| Paul Mundt | a23ba43 | 2007-11-28 20:19:38 +0900 | [diff] [blame] | 12 | * | 
|  | 13 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 14 | * License.  See the file "COPYING" in the main directory of this archive | 
|  | 15 | * for more details. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/errno.h> | 
|  | 18 | #include <linux/rwsem.h> | 
|  | 19 | #include <linux/sched.h> | 
|  | 20 | #include <linux/kernel.h> | 
|  | 21 | #include <linux/param.h> | 
|  | 22 | #include <linux/string.h> | 
|  | 23 | #include <linux/mm.h> | 
|  | 24 | #include <linux/interrupt.h> | 
|  | 25 | #include <linux/time.h> | 
|  | 26 | #include <linux/delay.h> | 
|  | 27 | #include <linux/init.h> | 
|  | 28 | #include <linux/profile.h> | 
|  | 29 | #include <linux/smp.h> | 
| Alexey Dobriyan | 4940fb4 | 2006-02-01 03:06:09 -0800 | [diff] [blame] | 30 | #include <linux/module.h> | 
| Matt Mackall | 4f3a36a | 2006-03-28 01:56:10 -0800 | [diff] [blame] | 31 | #include <linux/bcd.h> | 
| Paul Mundt | 6c7e2a5 | 2007-11-08 14:45:55 +0900 | [diff] [blame] | 32 | #include <linux/timex.h> | 
|  | 33 | #include <linux/irq.h> | 
| Paul Mundt | b4eaa1c | 2007-11-21 23:27:52 +0900 | [diff] [blame] | 34 | #include <linux/io.h> | 
| Paul Mundt | 6c7e2a5 | 2007-11-08 14:45:55 +0900 | [diff] [blame] | 35 | #include <linux/platform_device.h> | 
| Paul Mundt | f15cbe6 | 2008-07-29 08:09:44 +0900 | [diff] [blame] | 36 | #include <cpu/registers.h>	 /* required by inline __asm__ stmt. */ | 
|  | 37 | #include <cpu/irq.h> | 
| Paul Mundt | b4eaa1c | 2007-11-21 23:27:52 +0900 | [diff] [blame] | 38 | #include <asm/addrspace.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #include <asm/processor.h> | 
|  | 40 | #include <asm/uaccess.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | #include <asm/delay.h> | 
| Paul Mundt | 4d01cdaf | 2008-09-29 20:09:17 +0900 | [diff] [blame] | 42 | #include <asm/clock.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 |  | 
|  | 44 | #define TMU_TOCR_INIT	0x00 | 
|  | 45 | #define TMU0_TCR_INIT	0x0020 | 
|  | 46 | #define TMU_TSTR_INIT	1 | 
|  | 47 | #define TMU_TSTR_OFF	0 | 
|  | 48 |  | 
| Paul Mundt | 6c7e2a5 | 2007-11-08 14:45:55 +0900 | [diff] [blame] | 49 | /* Real Time Clock */ | 
|  | 50 | #define	RTC_BLOCK_OFF	0x01040000 | 
|  | 51 | #define RTC_BASE	PHYS_PERIPHERAL_BLOCK + RTC_BLOCK_OFF | 
|  | 52 | #define RTC_RCR1_CIE	0x10	/* Carry Interrupt Enable */ | 
|  | 53 | #define RTC_RCR1	(rtc_base + 0x38) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | /* Time Management Unit */ | 
|  | 56 | #define	TMU_BLOCK_OFF	0x01020000 | 
|  | 57 | #define TMU_BASE	PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF | 
|  | 58 | #define TMU0_BASE	tmu_base + 0x8 + (0xc * 0x0) | 
|  | 59 | #define TMU1_BASE	tmu_base + 0x8 + (0xc * 0x1) | 
|  | 60 | #define TMU2_BASE	tmu_base + 0x8 + (0xc * 0x2) | 
|  | 61 |  | 
|  | 62 | #define TMU_TOCR	tmu_base+0x0	/* Byte access */ | 
|  | 63 | #define TMU_TSTR	tmu_base+0x4	/* Byte access */ | 
|  | 64 |  | 
|  | 65 | #define TMU0_TCOR	TMU0_BASE+0x0	/* Long access */ | 
|  | 66 | #define TMU0_TCNT	TMU0_BASE+0x4	/* Long access */ | 
|  | 67 | #define TMU0_TCR	TMU0_BASE+0x8	/* Word access */ | 
|  | 68 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | #define TICK_SIZE (tick_nsec / 1000) | 
|  | 70 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | static unsigned long tmu_base, rtc_base; | 
|  | 72 | unsigned long cprc_base; | 
|  | 73 |  | 
|  | 74 | /* Variables to allow interpolation of time of day to resolution better than a | 
|  | 75 | * jiffy. */ | 
|  | 76 |  | 
|  | 77 | /* This is effectively protected by xtime_lock */ | 
|  | 78 | static unsigned long ctc_last_interrupt; | 
|  | 79 | static unsigned long long usecs_per_jiffy = 1000000/HZ; /* Approximation */ | 
|  | 80 |  | 
|  | 81 | #define CTC_JIFFY_SCALE_SHIFT 40 | 
|  | 82 |  | 
|  | 83 | /* 2**CTC_JIFFY_SCALE_SHIFT / ctc_ticks_per_jiffy */ | 
|  | 84 | static unsigned long long scaled_recip_ctc_ticks_per_jiffy; | 
|  | 85 |  | 
|  | 86 | /* Estimate number of microseconds that have elapsed since the last timer tick, | 
| Simon Arlott | 0a35477 | 2007-05-14 08:25:48 +0900 | [diff] [blame] | 87 | by scaling the delta that has occurred in the CTC register. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 |  | 
|  | 89 | WARNING WARNING WARNING : This algorithm relies on the CTC decrementing at | 
|  | 90 | the CPU clock rate.  If the CPU sleeps, the CTC stops counting.  Bear this | 
|  | 91 | in mind if enabling SLEEP_WORKS in process.c.  In that case, this algorithm | 
|  | 92 | probably needs to use TMU.TCNT0 instead.  This will work even if the CPU is | 
|  | 93 | sleeping, though will be coarser. | 
|  | 94 |  | 
|  | 95 | FIXME : What if usecs_per_tick is moving around too much, e.g. if an adjtime | 
|  | 96 | is running or if the freq or tick arguments of adjtimex are modified after | 
|  | 97 | we have calibrated the scaling factor?  This will result in either a jump at | 
|  | 98 | the end of a tick period, or a wrap backwards at the start of the next one, | 
|  | 99 | if the application is reading the time of day often enough.  I think we | 
|  | 100 | ought to do better than this.  For this reason, usecs_per_jiffy is left | 
|  | 101 | separated out in the calculation below.  This allows some future hook into | 
|  | 102 | the adjtime-related stuff in kernel/timer.c to remove this hazard. | 
|  | 103 |  | 
|  | 104 | */ | 
|  | 105 |  | 
|  | 106 | static unsigned long usecs_since_tick(void) | 
|  | 107 | { | 
|  | 108 | unsigned long long current_ctc; | 
|  | 109 | long ctc_ticks_since_interrupt; | 
|  | 110 | unsigned long long ull_ctc_ticks_since_interrupt; | 
|  | 111 | unsigned long result; | 
|  | 112 |  | 
|  | 113 | unsigned long long mul1_out; | 
|  | 114 | unsigned long long mul1_out_high; | 
|  | 115 | unsigned long long mul2_out_low, mul2_out_high; | 
|  | 116 |  | 
|  | 117 | /* Read CTC register */ | 
|  | 118 | asm ("getcon cr62, %0" : "=r" (current_ctc)); | 
|  | 119 | /* Note, the CTC counts down on each CPU clock, not up. | 
|  | 120 | Note(2), use long type to get correct wraparound arithmetic when | 
|  | 121 | the counter crosses zero. */ | 
|  | 122 | ctc_ticks_since_interrupt = (long) ctc_last_interrupt - (long) current_ctc; | 
|  | 123 | ull_ctc_ticks_since_interrupt = (unsigned long long) ctc_ticks_since_interrupt; | 
|  | 124 |  | 
|  | 125 | /* Inline assembly to do 32x32x32->64 multiplier */ | 
|  | 126 | asm volatile ("mulu.l %1, %2, %0" : | 
|  | 127 | "=r" (mul1_out) : | 
|  | 128 | "r" (ull_ctc_ticks_since_interrupt), "r" (usecs_per_jiffy)); | 
|  | 129 |  | 
|  | 130 | mul1_out_high = mul1_out >> 32; | 
|  | 131 |  | 
|  | 132 | asm volatile ("mulu.l %1, %2, %0" : | 
|  | 133 | "=r" (mul2_out_low) : | 
|  | 134 | "r" (mul1_out), "r" (scaled_recip_ctc_ticks_per_jiffy)); | 
|  | 135 |  | 
|  | 136 | #if 1 | 
|  | 137 | asm volatile ("mulu.l %1, %2, %0" : | 
|  | 138 | "=r" (mul2_out_high) : | 
|  | 139 | "r" (mul1_out_high), "r" (scaled_recip_ctc_ticks_per_jiffy)); | 
|  | 140 | #endif | 
|  | 141 |  | 
|  | 142 | result = (unsigned long) (((mul2_out_high << 32) + mul2_out_low) >> CTC_JIFFY_SCALE_SHIFT); | 
|  | 143 |  | 
|  | 144 | return result; | 
|  | 145 | } | 
|  | 146 |  | 
|  | 147 | void do_gettimeofday(struct timeval *tv) | 
|  | 148 | { | 
|  | 149 | unsigned long flags; | 
|  | 150 | unsigned long seq; | 
|  | 151 | unsigned long usec, sec; | 
|  | 152 |  | 
|  | 153 | do { | 
|  | 154 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | 
|  | 155 | usec = usecs_since_tick(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | sec = xtime.tv_sec; | 
|  | 157 | usec += xtime.tv_nsec / 1000; | 
|  | 158 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | 
|  | 159 |  | 
|  | 160 | while (usec >= 1000000) { | 
|  | 161 | usec -= 1000000; | 
|  | 162 | sec++; | 
|  | 163 | } | 
|  | 164 |  | 
|  | 165 | tv->tv_sec = sec; | 
|  | 166 | tv->tv_usec = usec; | 
|  | 167 | } | 
| Paul Mundt | 971ac16 | 2008-04-25 16:01:38 +0900 | [diff] [blame] | 168 | EXPORT_SYMBOL(do_gettimeofday); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 |  | 
|  | 170 | int do_settimeofday(struct timespec *tv) | 
|  | 171 | { | 
|  | 172 | time_t wtm_sec, sec = tv->tv_sec; | 
|  | 173 | long wtm_nsec, nsec = tv->tv_nsec; | 
|  | 174 |  | 
|  | 175 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | 
|  | 176 | return -EINVAL; | 
|  | 177 |  | 
|  | 178 | write_seqlock_irq(&xtime_lock); | 
|  | 179 | /* | 
|  | 180 | * This is revolting. We need to set "xtime" correctly. However, the | 
|  | 181 | * value in this location is the value at the most recent update of | 
|  | 182 | * wall time.  Discover what correction gettimeofday() would have | 
|  | 183 | * made, and then undo it! | 
|  | 184 | */ | 
| Atsushi Nemoto | 8ef3860 | 2006-09-30 23:28:31 -0700 | [diff] [blame] | 185 | nsec -= 1000 * usecs_since_tick(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 |  | 
|  | 187 | wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | 
|  | 188 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | 
|  | 189 |  | 
|  | 190 | set_normalized_timespec(&xtime, sec, nsec); | 
|  | 191 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | 
|  | 192 |  | 
| john stultz | b149ee2 | 2005-09-06 15:17:46 -0700 | [diff] [blame] | 193 | ntp_clear(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | write_sequnlock_irq(&xtime_lock); | 
|  | 195 | clock_was_set(); | 
|  | 196 |  | 
|  | 197 | return 0; | 
|  | 198 | } | 
| Al Viro | 943eae0 | 2005-10-29 07:32:07 +0100 | [diff] [blame] | 199 | EXPORT_SYMBOL(do_settimeofday); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 |  | 
| Paul Mundt | 6c7e2a5 | 2007-11-08 14:45:55 +0900 | [diff] [blame] | 201 | /* Dummy RTC ops */ | 
|  | 202 | static void null_rtc_get_time(struct timespec *tv) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | { | 
| Paul Mundt | 6c7e2a5 | 2007-11-08 14:45:55 +0900 | [diff] [blame] | 204 | tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0); | 
|  | 205 | tv->tv_nsec = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | } | 
|  | 207 |  | 
| Paul Mundt | 6c7e2a5 | 2007-11-08 14:45:55 +0900 | [diff] [blame] | 208 | static int null_rtc_set_time(const time_t secs) | 
|  | 209 | { | 
|  | 210 | return 0; | 
|  | 211 | } | 
|  | 212 |  | 
|  | 213 | void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time; | 
|  | 214 | int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; | 
|  | 215 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | /* last time the RTC clock got updated */ | 
| Paul Mundt | 6c7e2a5 | 2007-11-08 14:45:55 +0900 | [diff] [blame] | 217 | static long last_rtc_update; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 |  | 
|  | 219 | /* | 
|  | 220 | * timer_interrupt() needs to keep up the real-time clock, | 
|  | 221 | * as well as call the "do_timer()" routine every clocktick | 
|  | 222 | */ | 
| Paul Mundt | a226d33 | 2007-05-14 09:10:01 +0900 | [diff] [blame] | 223 | static inline void do_timer_interrupt(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | { | 
|  | 225 | unsigned long long current_ctc; | 
| Peter Zijlstra | 960c65e | 2008-02-07 15:01:26 +0900 | [diff] [blame] | 226 |  | 
|  | 227 | if (current->pid) | 
|  | 228 | profile_tick(CPU_PROFILING); | 
|  | 229 |  | 
|  | 230 | /* | 
|  | 231 | * Here we are in the timer irq handler. We just have irqs locally | 
|  | 232 | * disabled but we don't know if the timer_bh is running on the other | 
|  | 233 | * CPU. We need to avoid to SMP race with it. NOTE: we don' t need | 
|  | 234 | * the irq version of write_lock because as just said we have irq | 
|  | 235 | * locally disabled. -arca | 
|  | 236 | */ | 
| Paul Mundt | 9141d30 | 2008-04-25 11:54:24 +0900 | [diff] [blame] | 237 | write_seqlock(&xtime_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | asm ("getcon cr62, %0" : "=r" (current_ctc)); | 
|  | 239 | ctc_last_interrupt = (unsigned long) current_ctc; | 
|  | 240 |  | 
| Atsushi Nemoto | 3171a03 | 2006-09-29 02:00:32 -0700 | [diff] [blame] | 241 | do_timer(1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | /* | 
|  | 244 | * If we have an externally synchronized Linux clock, then update | 
|  | 245 | * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be | 
|  | 246 | * called as close as possible to 500 ms before the new second starts. | 
|  | 247 | */ | 
| john stultz | b149ee2 | 2005-09-06 15:17:46 -0700 | [diff] [blame] | 248 | if (ntp_synced() && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | xtime.tv_sec > last_rtc_update + 660 && | 
|  | 250 | (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && | 
|  | 251 | (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { | 
| Paul Mundt | 6c7e2a5 | 2007-11-08 14:45:55 +0900 | [diff] [blame] | 252 | if (rtc_sh_set_time(xtime.tv_sec) == 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | last_rtc_update = xtime.tv_sec; | 
|  | 254 | else | 
| Paul Mundt | 6c7e2a5 | 2007-11-08 14:45:55 +0900 | [diff] [blame] | 255 | /* do it again in 60 s */ | 
|  | 256 | last_rtc_update = xtime.tv_sec - 600; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | } | 
| Paul Mundt | 9141d30 | 2008-04-25 11:54:24 +0900 | [diff] [blame] | 258 | write_sequnlock(&xtime_lock); | 
| Peter Zijlstra | 960c65e | 2008-02-07 15:01:26 +0900 | [diff] [blame] | 259 |  | 
|  | 260 | #ifndef CONFIG_SMP | 
|  | 261 | update_process_times(user_mode(get_irq_regs())); | 
|  | 262 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | } | 
|  | 264 |  | 
|  | 265 | /* | 
|  | 266 | * This is the same as the above, except we _also_ save the current | 
|  | 267 | * Time Stamp Counter value at the time of the timer interrupt, so that | 
|  | 268 | * we later on can estimate the time of day more exactly. | 
|  | 269 | */ | 
| Paul Mundt | a226d33 | 2007-05-14 09:10:01 +0900 | [diff] [blame] | 270 | static irqreturn_t timer_interrupt(int irq, void *dev_id) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | { | 
|  | 272 | unsigned long timer_status; | 
|  | 273 |  | 
|  | 274 | /* Clear UNF bit */ | 
|  | 275 | timer_status = ctrl_inw(TMU0_TCR); | 
|  | 276 | timer_status &= ~0x100; | 
|  | 277 | ctrl_outw(timer_status, TMU0_TCR); | 
|  | 278 |  | 
| Paul Mundt | a226d33 | 2007-05-14 09:10:01 +0900 | [diff] [blame] | 279 | do_timer_interrupt(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 |  | 
|  | 281 | return IRQ_HANDLED; | 
|  | 282 | } | 
|  | 283 |  | 
| Thomas Gleixner | 948d12c | 2007-10-03 15:02:14 +0900 | [diff] [blame] | 284 | static struct irqaction irq0  = { | 
|  | 285 | .handler = timer_interrupt, | 
|  | 286 | .flags = IRQF_DISABLED, | 
| Thomas Gleixner | 948d12c | 2007-10-03 15:02:14 +0900 | [diff] [blame] | 287 | .name = "timer", | 
|  | 288 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 |  | 
|  | 290 | void __init time_init(void) | 
|  | 291 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | unsigned long interval; | 
| Paul Mundt | 4d01cdaf | 2008-09-29 20:09:17 +0900 | [diff] [blame] | 293 | struct clk *clk; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 |  | 
|  | 295 | tmu_base = onchip_remap(TMU_BASE, 1024, "TMU"); | 
|  | 296 | if (!tmu_base) { | 
|  | 297 | panic("Unable to remap TMU\n"); | 
|  | 298 | } | 
|  | 299 |  | 
|  | 300 | rtc_base = onchip_remap(RTC_BASE, 1024, "RTC"); | 
|  | 301 | if (!rtc_base) { | 
|  | 302 | panic("Unable to remap RTC\n"); | 
|  | 303 | } | 
|  | 304 |  | 
| Paul Mundt | 4d01cdaf | 2008-09-29 20:09:17 +0900 | [diff] [blame] | 305 | clk = clk_get(NULL, "cpu_clk"); | 
|  | 306 | scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / | 
|  | 307 | (unsigned long long)(clk_get_rate(clk) / HZ)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 |  | 
| Paul Mundt | 6c7e2a5 | 2007-11-08 14:45:55 +0900 | [diff] [blame] | 309 | rtc_sh_get_time(&xtime); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 |  | 
|  | 311 | setup_irq(TIMER_IRQ, &irq0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 |  | 
| Paul Mundt | 4d01cdaf | 2008-09-29 20:09:17 +0900 | [diff] [blame] | 313 | clk = clk_get(NULL, "module_clk"); | 
|  | 314 | interval = (clk_get_rate(clk)/(HZ*4)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 |  | 
|  | 316 | printk("Interval = %ld\n", interval); | 
|  | 317 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | /* Start TMU0 */ | 
|  | 319 | ctrl_outb(TMU_TSTR_OFF, TMU_TSTR); | 
|  | 320 | ctrl_outb(TMU_TOCR_INIT, TMU_TOCR); | 
|  | 321 | ctrl_outw(TMU0_TCR_INIT, TMU0_TCR); | 
|  | 322 | ctrl_outl(interval, TMU0_TCOR); | 
|  | 323 | ctrl_outl(interval, TMU0_TCNT); | 
|  | 324 | ctrl_outb(TMU_TSTR_INIT, TMU_TSTR); | 
|  | 325 | } | 
|  | 326 |  | 
| Paul Mundt | 6c7e2a5 | 2007-11-08 14:45:55 +0900 | [diff] [blame] | 327 | static struct resource rtc_resources[] = { | 
|  | 328 | [0] = { | 
|  | 329 | /* RTC base, filled in by rtc_init */ | 
|  | 330 | .flags	= IORESOURCE_IO, | 
|  | 331 | }, | 
|  | 332 | [1] = { | 
|  | 333 | /* Period IRQ */ | 
|  | 334 | .start	= IRQ_PRI, | 
|  | 335 | .flags	= IORESOURCE_IRQ, | 
|  | 336 | }, | 
|  | 337 | [2] = { | 
|  | 338 | /* Carry IRQ */ | 
|  | 339 | .start	= IRQ_CUI, | 
|  | 340 | .flags	= IORESOURCE_IRQ, | 
|  | 341 | }, | 
|  | 342 | [3] = { | 
|  | 343 | /* Alarm IRQ */ | 
|  | 344 | .start	= IRQ_ATI, | 
|  | 345 | .flags	= IORESOURCE_IRQ, | 
|  | 346 | }, | 
|  | 347 | }; | 
|  | 348 |  | 
|  | 349 | static struct platform_device rtc_device = { | 
|  | 350 | .name		= "sh-rtc", | 
|  | 351 | .id		= -1, | 
|  | 352 | .num_resources	= ARRAY_SIZE(rtc_resources), | 
|  | 353 | .resource	= rtc_resources, | 
|  | 354 | }; | 
|  | 355 |  | 
|  | 356 | static int __init rtc_init(void) | 
|  | 357 | { | 
|  | 358 | rtc_resources[0].start	= rtc_base; | 
|  | 359 | rtc_resources[0].end	= rtc_resources[0].start + 0x58 - 1; | 
|  | 360 |  | 
|  | 361 | return platform_device_register(&rtc_device); | 
|  | 362 | } | 
|  | 363 | device_initcall(rtc_init); |