Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 1 | /* |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 2 | * Based on arm clockevents implementation and old bfin time tick. |
| 3 | * |
Robin Getz | 96f1050 | 2009-09-24 14:11:24 +0000 | [diff] [blame] | 4 | * Copyright 2008-2009 Analog Devics Inc. |
| 5 | * 2008 GeoTechnologies |
| 6 | * Vitja Makarov |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 7 | * |
Robin Getz | 96f1050 | 2009-09-24 14:11:24 +0000 | [diff] [blame] | 8 | * Licensed under the GPL-2 |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 9 | */ |
Robin Getz | 96f1050 | 2009-09-24 14:11:24 +0000 | [diff] [blame] | 10 | |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 11 | #include <linux/module.h> |
| 12 | #include <linux/profile.h> |
| 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/time.h> |
Mike Frysinger | 764cb81 | 2008-04-24 05:07:29 +0800 | [diff] [blame] | 15 | #include <linux/timex.h> |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 16 | #include <linux/irq.h> |
| 17 | #include <linux/clocksource.h> |
| 18 | #include <linux/clockchips.h> |
Michael Hennerich | e6c91b6 | 2008-04-25 04:58:29 +0800 | [diff] [blame] | 19 | #include <linux/cpufreq.h> |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 20 | |
| 21 | #include <asm/blackfin.h> |
Michael Hennerich | e6c91b6 | 2008-04-25 04:58:29 +0800 | [diff] [blame] | 22 | #include <asm/time.h> |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 23 | #include <asm/gptimers.h> |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 24 | |
Michael Hennerich | e6c91b6 | 2008-04-25 04:58:29 +0800 | [diff] [blame] | 25 | /* Accelerators for sched_clock() |
| 26 | * convert from cycles(64bits) => nanoseconds (64bits) |
| 27 | * basic equation: |
| 28 | * ns = cycles / (freq / ns_per_sec) |
| 29 | * ns = cycles * (ns_per_sec / freq) |
| 30 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) |
| 31 | * ns = cycles * (10^6 / cpu_khz) |
| 32 | * |
| 33 | * Then we use scaling math (suggested by george@mvista.com) to get: |
| 34 | * ns = cycles * (10^6 * SC / cpu_khz) / SC |
| 35 | * ns = cycles * cyc2ns_scale / SC |
| 36 | * |
| 37 | * And since SC is a constant power of two, we can convert the div |
| 38 | * into a shift. |
| 39 | * |
| 40 | * We can use khz divisor instead of mhz to keep a better precision, since |
| 41 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. |
| 42 | * (mathieu.desnoyers@polymtl.ca) |
| 43 | * |
| 44 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" |
| 45 | */ |
| 46 | |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 47 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ |
| 48 | |
Yi Li | ceb33be | 2009-09-15 06:50:51 +0000 | [diff] [blame] | 49 | #if defined(CONFIG_CYCLES_CLOCKSOURCE) |
| 50 | |
Yi Li | ceb33be | 2009-09-15 06:50:51 +0000 | [diff] [blame] | 51 | static notrace cycle_t bfin_read_cycles(struct clocksource *cs) |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 52 | { |
Vitja Makarov | 1bfb4b2 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 53 | return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 54 | } |
| 55 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 56 | static struct clocksource bfin_cs_cycles = { |
| 57 | .name = "bfin_cs_cycles", |
Graf Yang | e78feaa | 2009-09-14 04:41:00 +0000 | [diff] [blame] | 58 | .rating = 400, |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 59 | .read = bfin_read_cycles, |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 60 | .mask = CLOCKSOURCE_MASK(64), |
Yi Li | 2985712 | 2009-09-15 08:55:47 +0000 | [diff] [blame] | 61 | .shift = CYC2NS_SCALE_FACTOR, |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 62 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
| 63 | }; |
| 64 | |
Yi Li | ceb33be | 2009-09-15 06:50:51 +0000 | [diff] [blame] | 65 | static inline unsigned long long bfin_cs_cycles_sched_clock(void) |
Magnus Damm | 8e19608 | 2009-04-21 12:24:00 -0700 | [diff] [blame] | 66 | { |
Mike Frysinger | c768a94 | 2009-12-04 03:32:11 +0000 | [diff] [blame^] | 67 | return clocksource_cyc2ns(bfin_read_cycles(&bfin_cs_cycles), |
| 68 | bfin_cs_cycles.mult, bfin_cs_cycles.shift); |
Magnus Damm | 8e19608 | 2009-04-21 12:24:00 -0700 | [diff] [blame] | 69 | } |
| 70 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 71 | static int __init bfin_cs_cycles_init(void) |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 72 | { |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 73 | bfin_cs_cycles.mult = \ |
| 74 | clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 75 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 76 | if (clocksource_register(&bfin_cs_cycles)) |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 77 | panic("failed to register clocksource"); |
| 78 | |
| 79 | return 0; |
| 80 | } |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 81 | #else |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 82 | # define bfin_cs_cycles_init() |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 83 | #endif |
| 84 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 85 | #ifdef CONFIG_GPTMR0_CLOCKSOURCE |
| 86 | |
| 87 | void __init setup_gptimer0(void) |
| 88 | { |
| 89 | disable_gptimers(TIMER0bit); |
| 90 | |
| 91 | set_gptimer_config(TIMER0_id, \ |
| 92 | TIMER_OUT_DIS | TIMER_PERIOD_CNT | TIMER_MODE_PWM); |
| 93 | set_gptimer_period(TIMER0_id, -1); |
| 94 | set_gptimer_pwidth(TIMER0_id, -2); |
| 95 | SSYNC(); |
| 96 | enable_gptimers(TIMER0bit); |
| 97 | } |
| 98 | |
Yi Li | f7036d6 | 2009-09-15 02:08:50 +0000 | [diff] [blame] | 99 | static cycle_t bfin_read_gptimer0(struct clocksource *cs) |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 100 | { |
| 101 | return bfin_read_TIMER0_COUNTER(); |
| 102 | } |
| 103 | |
| 104 | static struct clocksource bfin_cs_gptimer0 = { |
| 105 | .name = "bfin_cs_gptimer0", |
Graf Yang | e78feaa | 2009-09-14 04:41:00 +0000 | [diff] [blame] | 106 | .rating = 350, |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 107 | .read = bfin_read_gptimer0, |
| 108 | .mask = CLOCKSOURCE_MASK(32), |
Yi Li | 2985712 | 2009-09-15 08:55:47 +0000 | [diff] [blame] | 109 | .shift = CYC2NS_SCALE_FACTOR, |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 110 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
| 111 | }; |
| 112 | |
Yi Li | ceb33be | 2009-09-15 06:50:51 +0000 | [diff] [blame] | 113 | static inline unsigned long long bfin_cs_gptimer0_sched_clock(void) |
| 114 | { |
Mike Frysinger | c768a94 | 2009-12-04 03:32:11 +0000 | [diff] [blame^] | 115 | return clocksource_cyc2ns(bfin_read_TIMER0_COUNTER(), |
| 116 | bfin_cs_gptimer0.mult, bfin_cs_gptimer0.shift); |
Yi Li | ceb33be | 2009-09-15 06:50:51 +0000 | [diff] [blame] | 117 | } |
| 118 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 119 | static int __init bfin_cs_gptimer0_init(void) |
| 120 | { |
| 121 | setup_gptimer0(); |
| 122 | |
| 123 | bfin_cs_gptimer0.mult = \ |
| 124 | clocksource_hz2mult(get_sclk(), bfin_cs_gptimer0.shift); |
| 125 | |
| 126 | if (clocksource_register(&bfin_cs_gptimer0)) |
| 127 | panic("failed to register clocksource"); |
| 128 | |
| 129 | return 0; |
| 130 | } |
| 131 | #else |
| 132 | # define bfin_cs_gptimer0_init() |
| 133 | #endif |
| 134 | |
Yi Li | ceb33be | 2009-09-15 06:50:51 +0000 | [diff] [blame] | 135 | |
| 136 | #if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE) |
| 137 | /* prefer to use cycles since it has higher rating */ |
| 138 | notrace unsigned long long sched_clock(void) |
| 139 | { |
| 140 | #if defined(CONFIG_CYCLES_CLOCKSOURCE) |
| 141 | return bfin_cs_cycles_sched_clock(); |
| 142 | #else |
| 143 | return bfin_cs_gptimer0_sched_clock(); |
| 144 | #endif |
| 145 | } |
| 146 | #endif |
| 147 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 148 | #ifdef CONFIG_CORE_TIMER_IRQ_L1 |
| 149 | __attribute__((l1_text)) |
| 150 | #endif |
| 151 | irqreturn_t timer_interrupt(int irq, void *dev_id); |
| 152 | |
| 153 | static int bfin_timer_set_next_event(unsigned long, \ |
| 154 | struct clock_event_device *); |
| 155 | |
| 156 | static void bfin_timer_set_mode(enum clock_event_mode, \ |
| 157 | struct clock_event_device *); |
| 158 | |
| 159 | static struct clock_event_device clockevent_bfin = { |
| 160 | #if defined(CONFIG_TICKSOURCE_GPTMR0) |
| 161 | .name = "bfin_gptimer0", |
| 162 | .rating = 300, |
| 163 | .irq = IRQ_TIMER0, |
| 164 | #else |
| 165 | .name = "bfin_core_timer", |
| 166 | .rating = 350, |
| 167 | .irq = IRQ_CORETMR, |
| 168 | #endif |
| 169 | .shift = 32, |
| 170 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
| 171 | .set_next_event = bfin_timer_set_next_event, |
| 172 | .set_mode = bfin_timer_set_mode, |
| 173 | }; |
| 174 | |
| 175 | static struct irqaction bfin_timer_irq = { |
| 176 | #if defined(CONFIG_TICKSOURCE_GPTMR0) |
| 177 | .name = "Blackfin GPTimer0", |
| 178 | #else |
| 179 | .name = "Blackfin CoreTimer", |
| 180 | #endif |
| 181 | .flags = IRQF_DISABLED | IRQF_TIMER | \ |
| 182 | IRQF_IRQPOLL | IRQF_PERCPU, |
| 183 | .handler = timer_interrupt, |
| 184 | .dev_id = &clockevent_bfin, |
| 185 | }; |
| 186 | |
| 187 | #if defined(CONFIG_TICKSOURCE_GPTMR0) |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 188 | static int bfin_timer_set_next_event(unsigned long cycles, |
| 189 | struct clock_event_device *evt) |
| 190 | { |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 191 | disable_gptimers(TIMER0bit); |
| 192 | |
| 193 | /* it starts counting three SCLK cycles after the TIMENx bit is set */ |
| 194 | set_gptimer_pwidth(TIMER0_id, cycles - 3); |
| 195 | enable_gptimers(TIMER0bit); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 196 | return 0; |
| 197 | } |
| 198 | |
| 199 | static void bfin_timer_set_mode(enum clock_event_mode mode, |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 200 | struct clock_event_device *evt) |
| 201 | { |
| 202 | switch (mode) { |
| 203 | case CLOCK_EVT_MODE_PERIODIC: { |
| 204 | set_gptimer_config(TIMER0_id, \ |
| 205 | TIMER_OUT_DIS | TIMER_IRQ_ENA | \ |
| 206 | TIMER_PERIOD_CNT | TIMER_MODE_PWM); |
| 207 | set_gptimer_period(TIMER0_id, get_sclk() / HZ); |
| 208 | set_gptimer_pwidth(TIMER0_id, get_sclk() / HZ - 1); |
| 209 | enable_gptimers(TIMER0bit); |
| 210 | break; |
| 211 | } |
| 212 | case CLOCK_EVT_MODE_ONESHOT: |
| 213 | disable_gptimers(TIMER0bit); |
| 214 | set_gptimer_config(TIMER0_id, \ |
| 215 | TIMER_OUT_DIS | TIMER_IRQ_ENA | TIMER_MODE_PWM); |
| 216 | set_gptimer_period(TIMER0_id, 0); |
| 217 | break; |
| 218 | case CLOCK_EVT_MODE_UNUSED: |
| 219 | case CLOCK_EVT_MODE_SHUTDOWN: |
| 220 | disable_gptimers(TIMER0bit); |
| 221 | break; |
| 222 | case CLOCK_EVT_MODE_RESUME: |
| 223 | break; |
| 224 | } |
| 225 | } |
| 226 | |
| 227 | static void bfin_timer_ack(void) |
| 228 | { |
| 229 | set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0); |
| 230 | } |
| 231 | |
| 232 | static void __init bfin_timer_init(void) |
| 233 | { |
| 234 | disable_gptimers(TIMER0bit); |
| 235 | } |
| 236 | |
| 237 | static unsigned long __init bfin_clockevent_check(void) |
| 238 | { |
| 239 | setup_irq(IRQ_TIMER0, &bfin_timer_irq); |
| 240 | return get_sclk(); |
| 241 | } |
| 242 | |
| 243 | #else /* CONFIG_TICKSOURCE_CORETMR */ |
| 244 | |
| 245 | static int bfin_timer_set_next_event(unsigned long cycles, |
| 246 | struct clock_event_device *evt) |
| 247 | { |
| 248 | bfin_write_TCNTL(TMPWR); |
| 249 | CSYNC(); |
| 250 | bfin_write_TCOUNT(cycles); |
| 251 | CSYNC(); |
| 252 | bfin_write_TCNTL(TMPWR | TMREN); |
| 253 | return 0; |
| 254 | } |
| 255 | |
| 256 | static void bfin_timer_set_mode(enum clock_event_mode mode, |
| 257 | struct clock_event_device *evt) |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 258 | { |
| 259 | switch (mode) { |
| 260 | case CLOCK_EVT_MODE_PERIODIC: { |
Michael Hennerich | e6c91b6 | 2008-04-25 04:58:29 +0800 | [diff] [blame] | 261 | unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 262 | bfin_write_TCNTL(TMPWR); |
| 263 | CSYNC(); |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 264 | bfin_write_TSCALE(TIME_SCALE - 1); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 265 | bfin_write_TPERIOD(tcount); |
| 266 | bfin_write_TCOUNT(tcount); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 267 | CSYNC(); |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 268 | bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 269 | break; |
| 270 | } |
| 271 | case CLOCK_EVT_MODE_ONESHOT: |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 272 | bfin_write_TCNTL(TMPWR); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 273 | CSYNC(); |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 274 | bfin_write_TSCALE(TIME_SCALE - 1); |
| 275 | bfin_write_TPERIOD(0); |
| 276 | bfin_write_TCOUNT(0); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 277 | break; |
| 278 | case CLOCK_EVT_MODE_UNUSED: |
| 279 | case CLOCK_EVT_MODE_SHUTDOWN: |
| 280 | bfin_write_TCNTL(0); |
| 281 | CSYNC(); |
| 282 | break; |
| 283 | case CLOCK_EVT_MODE_RESUME: |
| 284 | break; |
| 285 | } |
| 286 | } |
| 287 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 288 | static void bfin_timer_ack(void) |
| 289 | { |
| 290 | } |
| 291 | |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 292 | static void __init bfin_timer_init(void) |
| 293 | { |
| 294 | /* power up the timer, but don't enable it just yet */ |
| 295 | bfin_write_TCNTL(TMPWR); |
| 296 | CSYNC(); |
| 297 | |
| 298 | /* |
| 299 | * the TSCALE prescaler counter. |
| 300 | */ |
Michael Hennerich | e6c91b6 | 2008-04-25 04:58:29 +0800 | [diff] [blame] | 301 | bfin_write_TSCALE(TIME_SCALE - 1); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 302 | bfin_write_TPERIOD(0); |
| 303 | bfin_write_TCOUNT(0); |
| 304 | |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 305 | CSYNC(); |
| 306 | } |
| 307 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 308 | static unsigned long __init bfin_clockevent_check(void) |
| 309 | { |
| 310 | setup_irq(IRQ_CORETMR, &bfin_timer_irq); |
| 311 | return get_cclk() / TIME_SCALE; |
| 312 | } |
| 313 | |
| 314 | void __init setup_core_timer(void) |
| 315 | { |
| 316 | bfin_timer_init(); |
| 317 | bfin_timer_set_mode(CLOCK_EVT_MODE_PERIODIC, NULL); |
| 318 | } |
| 319 | #endif /* CONFIG_TICKSOURCE_GPTMR0 */ |
| 320 | |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 321 | /* |
| 322 | * timer_interrupt() needs to keep up the real-time clock, |
| 323 | * as well as call the "do_timer()" routine every clocktick |
| 324 | */ |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 325 | irqreturn_t timer_interrupt(int irq, void *dev_id) |
| 326 | { |
| 327 | struct clock_event_device *evt = dev_id; |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 328 | smp_mb(); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 329 | evt->event_handler(evt); |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 330 | bfin_timer_ack(); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 331 | return IRQ_HANDLED; |
| 332 | } |
| 333 | |
| 334 | static int __init bfin_clockevent_init(void) |
| 335 | { |
Vitja Makarov | 1bfb4b2 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 336 | unsigned long timer_clk; |
| 337 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 338 | timer_clk = bfin_clockevent_check(); |
Vitja Makarov | 1bfb4b2 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 339 | |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 340 | bfin_timer_init(); |
| 341 | |
Vitja Makarov | 1bfb4b2 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 342 | clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 343 | clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin); |
| 344 | clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin); |
Rusty Russell | 320ab2b | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 345 | clockevent_bfin.cpumask = cpumask_of(0); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 346 | clockevents_register_device(&clockevent_bfin); |
| 347 | |
| 348 | return 0; |
| 349 | } |
| 350 | |
| 351 | void __init time_init(void) |
| 352 | { |
| 353 | time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */ |
| 354 | |
| 355 | #ifdef CONFIG_RTC_DRV_BFIN |
| 356 | /* [#2663] hack to filter junk RTC values that would cause |
| 357 | * userspace to have to deal with time values greater than |
| 358 | * 2^31 seconds (which uClibc cannot cope with yet) |
| 359 | */ |
| 360 | if ((bfin_read_RTC_STAT() & 0xC0000000) == 0xC0000000) { |
| 361 | printk(KERN_NOTICE "bfin-rtc: invalid date; resetting\n"); |
| 362 | bfin_write_RTC_STAT(0); |
| 363 | } |
| 364 | #endif |
| 365 | |
| 366 | /* Initialize xtime. From now on, xtime is updated with timer interrupts */ |
| 367 | xtime.tv_sec = secs_since_1970; |
| 368 | xtime.tv_nsec = 0; |
| 369 | set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); |
| 370 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 371 | bfin_cs_cycles_init(); |
| 372 | bfin_cs_gptimer0_init(); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 373 | bfin_clockevent_init(); |
| 374 | } |