Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 1 | /* |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 2 | * Based on arm clockevents implementation and old bfin time tick. |
| 3 | * |
Robin Getz | 96f1050 | 2009-09-24 14:11:24 +0000 | [diff] [blame] | 4 | * Copyright 2008-2009 Analog Devics Inc. |
| 5 | * 2008 GeoTechnologies |
| 6 | * Vitja Makarov |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 7 | * |
Robin Getz | 96f1050 | 2009-09-24 14:11:24 +0000 | [diff] [blame] | 8 | * Licensed under the GPL-2 |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 9 | */ |
Robin Getz | 96f1050 | 2009-09-24 14:11:24 +0000 | [diff] [blame] | 10 | |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 11 | #include <linux/module.h> |
| 12 | #include <linux/profile.h> |
| 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/time.h> |
Mike Frysinger | 764cb81 | 2008-04-24 05:07:29 +0800 | [diff] [blame] | 15 | #include <linux/timex.h> |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 16 | #include <linux/irq.h> |
| 17 | #include <linux/clocksource.h> |
| 18 | #include <linux/clockchips.h> |
Michael Hennerich | e6c91b6 | 2008-04-25 04:58:29 +0800 | [diff] [blame] | 19 | #include <linux/cpufreq.h> |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 20 | |
| 21 | #include <asm/blackfin.h> |
Michael Hennerich | e6c91b6 | 2008-04-25 04:58:29 +0800 | [diff] [blame] | 22 | #include <asm/time.h> |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 23 | #include <asm/gptimers.h> |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 24 | |
Michael Hennerich | e6c91b6 | 2008-04-25 04:58:29 +0800 | [diff] [blame] | 25 | /* Accelerators for sched_clock() |
| 26 | * convert from cycles(64bits) => nanoseconds (64bits) |
| 27 | * basic equation: |
| 28 | * ns = cycles / (freq / ns_per_sec) |
| 29 | * ns = cycles * (ns_per_sec / freq) |
| 30 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) |
| 31 | * ns = cycles * (10^6 / cpu_khz) |
| 32 | * |
| 33 | * Then we use scaling math (suggested by george@mvista.com) to get: |
| 34 | * ns = cycles * (10^6 * SC / cpu_khz) / SC |
| 35 | * ns = cycles * cyc2ns_scale / SC |
| 36 | * |
| 37 | * And since SC is a constant power of two, we can convert the div |
| 38 | * into a shift. |
| 39 | * |
| 40 | * We can use khz divisor instead of mhz to keep a better precision, since |
| 41 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. |
| 42 | * (mathieu.desnoyers@polymtl.ca) |
| 43 | * |
| 44 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" |
| 45 | */ |
| 46 | |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 47 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ |
| 48 | |
Yi Li | ceb33be | 2009-09-15 06:50:51 +0000 | [diff] [blame^] | 49 | static inline unsigned long cyc2ns_scale(unsigned long cpu_khz) |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 50 | { |
Yi Li | ceb33be | 2009-09-15 06:50:51 +0000 | [diff] [blame^] | 51 | return (1000000 << CYC2NS_SCALE_FACTOR) / cpu_khz; |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 52 | } |
| 53 | |
Yi Li | ceb33be | 2009-09-15 06:50:51 +0000 | [diff] [blame^] | 54 | static inline unsigned long long cycles_2_ns(cycle_t cyc, unsigned long cyc2ns_scale) |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 55 | { |
| 56 | return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; |
| 57 | } |
| 58 | |
Yi Li | ceb33be | 2009-09-15 06:50:51 +0000 | [diff] [blame^] | 59 | #if defined(CONFIG_CYCLES_CLOCKSOURCE) |
| 60 | |
| 61 | static unsigned long cycles_cyc2ns_scale; |
| 62 | |
| 63 | static notrace cycle_t bfin_read_cycles(struct clocksource *cs) |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 64 | { |
Vitja Makarov | 1bfb4b2 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 65 | return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 66 | } |
| 67 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 68 | static struct clocksource bfin_cs_cycles = { |
| 69 | .name = "bfin_cs_cycles", |
Graf Yang | e78feaa | 2009-09-14 04:41:00 +0000 | [diff] [blame] | 70 | .rating = 400, |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 71 | .read = bfin_read_cycles, |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 72 | .mask = CLOCKSOURCE_MASK(64), |
| 73 | .shift = 22, |
| 74 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
| 75 | }; |
| 76 | |
Yi Li | ceb33be | 2009-09-15 06:50:51 +0000 | [diff] [blame^] | 77 | static inline unsigned long long bfin_cs_cycles_sched_clock(void) |
Magnus Damm | 8e19608 | 2009-04-21 12:24:00 -0700 | [diff] [blame] | 78 | { |
Yi Li | ceb33be | 2009-09-15 06:50:51 +0000 | [diff] [blame^] | 79 | return cycles_2_ns(bfin_read_cycles(&bfin_cs_cycles), cycles_cyc2ns_scale); |
Magnus Damm | 8e19608 | 2009-04-21 12:24:00 -0700 | [diff] [blame] | 80 | } |
| 81 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 82 | static int __init bfin_cs_cycles_init(void) |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 83 | { |
Yi Li | ceb33be | 2009-09-15 06:50:51 +0000 | [diff] [blame^] | 84 | cycles_cyc2ns_scale = cyc2ns_scale(get_cclk() / 1000); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 85 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 86 | bfin_cs_cycles.mult = \ |
| 87 | clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 88 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 89 | if (clocksource_register(&bfin_cs_cycles)) |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 90 | panic("failed to register clocksource"); |
| 91 | |
| 92 | return 0; |
| 93 | } |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 94 | #else |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 95 | # define bfin_cs_cycles_init() |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 96 | #endif |
| 97 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 98 | #ifdef CONFIG_GPTMR0_CLOCKSOURCE |
| 99 | |
Yi Li | ceb33be | 2009-09-15 06:50:51 +0000 | [diff] [blame^] | 100 | unsigned long gptimer0_cyc2ns_scale; |
| 101 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 102 | void __init setup_gptimer0(void) |
| 103 | { |
| 104 | disable_gptimers(TIMER0bit); |
| 105 | |
| 106 | set_gptimer_config(TIMER0_id, \ |
| 107 | TIMER_OUT_DIS | TIMER_PERIOD_CNT | TIMER_MODE_PWM); |
| 108 | set_gptimer_period(TIMER0_id, -1); |
| 109 | set_gptimer_pwidth(TIMER0_id, -2); |
| 110 | SSYNC(); |
| 111 | enable_gptimers(TIMER0bit); |
| 112 | } |
| 113 | |
Yi Li | f7036d6 | 2009-09-15 02:08:50 +0000 | [diff] [blame] | 114 | static cycle_t bfin_read_gptimer0(struct clocksource *cs) |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 115 | { |
| 116 | return bfin_read_TIMER0_COUNTER(); |
| 117 | } |
| 118 | |
| 119 | static struct clocksource bfin_cs_gptimer0 = { |
| 120 | .name = "bfin_cs_gptimer0", |
Graf Yang | e78feaa | 2009-09-14 04:41:00 +0000 | [diff] [blame] | 121 | .rating = 350, |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 122 | .read = bfin_read_gptimer0, |
| 123 | .mask = CLOCKSOURCE_MASK(32), |
| 124 | .shift = 22, |
| 125 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
| 126 | }; |
| 127 | |
Yi Li | ceb33be | 2009-09-15 06:50:51 +0000 | [diff] [blame^] | 128 | static inline unsigned long long bfin_cs_gptimer0_sched_clock(void) |
| 129 | { |
| 130 | return cycles_2_ns(bfin_read_TIMER0_COUNTER(), gptimer0_cyc2ns_scale); |
| 131 | } |
| 132 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 133 | static int __init bfin_cs_gptimer0_init(void) |
| 134 | { |
Yi Li | ceb33be | 2009-09-15 06:50:51 +0000 | [diff] [blame^] | 135 | gptimer0_cyc2ns_scale = cyc2ns_scale(get_sclk() / 1000); |
| 136 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 137 | setup_gptimer0(); |
| 138 | |
| 139 | bfin_cs_gptimer0.mult = \ |
| 140 | clocksource_hz2mult(get_sclk(), bfin_cs_gptimer0.shift); |
| 141 | |
| 142 | if (clocksource_register(&bfin_cs_gptimer0)) |
| 143 | panic("failed to register clocksource"); |
| 144 | |
| 145 | return 0; |
| 146 | } |
| 147 | #else |
| 148 | # define bfin_cs_gptimer0_init() |
| 149 | #endif |
| 150 | |
Yi Li | ceb33be | 2009-09-15 06:50:51 +0000 | [diff] [blame^] | 151 | |
| 152 | #if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE) |
| 153 | /* prefer to use cycles since it has higher rating */ |
| 154 | notrace unsigned long long sched_clock(void) |
| 155 | { |
| 156 | #if defined(CONFIG_CYCLES_CLOCKSOURCE) |
| 157 | return bfin_cs_cycles_sched_clock(); |
| 158 | #else |
| 159 | return bfin_cs_gptimer0_sched_clock(); |
| 160 | #endif |
| 161 | } |
| 162 | #endif |
| 163 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 164 | #ifdef CONFIG_CORE_TIMER_IRQ_L1 |
| 165 | __attribute__((l1_text)) |
| 166 | #endif |
| 167 | irqreturn_t timer_interrupt(int irq, void *dev_id); |
| 168 | |
| 169 | static int bfin_timer_set_next_event(unsigned long, \ |
| 170 | struct clock_event_device *); |
| 171 | |
| 172 | static void bfin_timer_set_mode(enum clock_event_mode, \ |
| 173 | struct clock_event_device *); |
| 174 | |
| 175 | static struct clock_event_device clockevent_bfin = { |
| 176 | #if defined(CONFIG_TICKSOURCE_GPTMR0) |
| 177 | .name = "bfin_gptimer0", |
| 178 | .rating = 300, |
| 179 | .irq = IRQ_TIMER0, |
| 180 | #else |
| 181 | .name = "bfin_core_timer", |
| 182 | .rating = 350, |
| 183 | .irq = IRQ_CORETMR, |
| 184 | #endif |
| 185 | .shift = 32, |
| 186 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
| 187 | .set_next_event = bfin_timer_set_next_event, |
| 188 | .set_mode = bfin_timer_set_mode, |
| 189 | }; |
| 190 | |
| 191 | static struct irqaction bfin_timer_irq = { |
| 192 | #if defined(CONFIG_TICKSOURCE_GPTMR0) |
| 193 | .name = "Blackfin GPTimer0", |
| 194 | #else |
| 195 | .name = "Blackfin CoreTimer", |
| 196 | #endif |
| 197 | .flags = IRQF_DISABLED | IRQF_TIMER | \ |
| 198 | IRQF_IRQPOLL | IRQF_PERCPU, |
| 199 | .handler = timer_interrupt, |
| 200 | .dev_id = &clockevent_bfin, |
| 201 | }; |
| 202 | |
| 203 | #if defined(CONFIG_TICKSOURCE_GPTMR0) |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 204 | static int bfin_timer_set_next_event(unsigned long cycles, |
| 205 | struct clock_event_device *evt) |
| 206 | { |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 207 | disable_gptimers(TIMER0bit); |
| 208 | |
| 209 | /* it starts counting three SCLK cycles after the TIMENx bit is set */ |
| 210 | set_gptimer_pwidth(TIMER0_id, cycles - 3); |
| 211 | enable_gptimers(TIMER0bit); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 212 | return 0; |
| 213 | } |
| 214 | |
| 215 | static void bfin_timer_set_mode(enum clock_event_mode mode, |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 216 | struct clock_event_device *evt) |
| 217 | { |
| 218 | switch (mode) { |
| 219 | case CLOCK_EVT_MODE_PERIODIC: { |
| 220 | set_gptimer_config(TIMER0_id, \ |
| 221 | TIMER_OUT_DIS | TIMER_IRQ_ENA | \ |
| 222 | TIMER_PERIOD_CNT | TIMER_MODE_PWM); |
| 223 | set_gptimer_period(TIMER0_id, get_sclk() / HZ); |
| 224 | set_gptimer_pwidth(TIMER0_id, get_sclk() / HZ - 1); |
| 225 | enable_gptimers(TIMER0bit); |
| 226 | break; |
| 227 | } |
| 228 | case CLOCK_EVT_MODE_ONESHOT: |
| 229 | disable_gptimers(TIMER0bit); |
| 230 | set_gptimer_config(TIMER0_id, \ |
| 231 | TIMER_OUT_DIS | TIMER_IRQ_ENA | TIMER_MODE_PWM); |
| 232 | set_gptimer_period(TIMER0_id, 0); |
| 233 | break; |
| 234 | case CLOCK_EVT_MODE_UNUSED: |
| 235 | case CLOCK_EVT_MODE_SHUTDOWN: |
| 236 | disable_gptimers(TIMER0bit); |
| 237 | break; |
| 238 | case CLOCK_EVT_MODE_RESUME: |
| 239 | break; |
| 240 | } |
| 241 | } |
| 242 | |
| 243 | static void bfin_timer_ack(void) |
| 244 | { |
| 245 | set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0); |
| 246 | } |
| 247 | |
| 248 | static void __init bfin_timer_init(void) |
| 249 | { |
| 250 | disable_gptimers(TIMER0bit); |
| 251 | } |
| 252 | |
| 253 | static unsigned long __init bfin_clockevent_check(void) |
| 254 | { |
| 255 | setup_irq(IRQ_TIMER0, &bfin_timer_irq); |
| 256 | return get_sclk(); |
| 257 | } |
| 258 | |
| 259 | #else /* CONFIG_TICKSOURCE_CORETMR */ |
| 260 | |
| 261 | static int bfin_timer_set_next_event(unsigned long cycles, |
| 262 | struct clock_event_device *evt) |
| 263 | { |
| 264 | bfin_write_TCNTL(TMPWR); |
| 265 | CSYNC(); |
| 266 | bfin_write_TCOUNT(cycles); |
| 267 | CSYNC(); |
| 268 | bfin_write_TCNTL(TMPWR | TMREN); |
| 269 | return 0; |
| 270 | } |
| 271 | |
| 272 | static void bfin_timer_set_mode(enum clock_event_mode mode, |
| 273 | struct clock_event_device *evt) |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 274 | { |
| 275 | switch (mode) { |
| 276 | case CLOCK_EVT_MODE_PERIODIC: { |
Michael Hennerich | e6c91b6 | 2008-04-25 04:58:29 +0800 | [diff] [blame] | 277 | unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 278 | bfin_write_TCNTL(TMPWR); |
| 279 | CSYNC(); |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 280 | bfin_write_TSCALE(TIME_SCALE - 1); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 281 | bfin_write_TPERIOD(tcount); |
| 282 | bfin_write_TCOUNT(tcount); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 283 | CSYNC(); |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 284 | bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 285 | break; |
| 286 | } |
| 287 | case CLOCK_EVT_MODE_ONESHOT: |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 288 | bfin_write_TCNTL(TMPWR); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 289 | CSYNC(); |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 290 | bfin_write_TSCALE(TIME_SCALE - 1); |
| 291 | bfin_write_TPERIOD(0); |
| 292 | bfin_write_TCOUNT(0); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 293 | break; |
| 294 | case CLOCK_EVT_MODE_UNUSED: |
| 295 | case CLOCK_EVT_MODE_SHUTDOWN: |
| 296 | bfin_write_TCNTL(0); |
| 297 | CSYNC(); |
| 298 | break; |
| 299 | case CLOCK_EVT_MODE_RESUME: |
| 300 | break; |
| 301 | } |
| 302 | } |
| 303 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 304 | static void bfin_timer_ack(void) |
| 305 | { |
| 306 | } |
| 307 | |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 308 | static void __init bfin_timer_init(void) |
| 309 | { |
| 310 | /* power up the timer, but don't enable it just yet */ |
| 311 | bfin_write_TCNTL(TMPWR); |
| 312 | CSYNC(); |
| 313 | |
| 314 | /* |
| 315 | * the TSCALE prescaler counter. |
| 316 | */ |
Michael Hennerich | e6c91b6 | 2008-04-25 04:58:29 +0800 | [diff] [blame] | 317 | bfin_write_TSCALE(TIME_SCALE - 1); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 318 | bfin_write_TPERIOD(0); |
| 319 | bfin_write_TCOUNT(0); |
| 320 | |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 321 | CSYNC(); |
| 322 | } |
| 323 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 324 | static unsigned long __init bfin_clockevent_check(void) |
| 325 | { |
| 326 | setup_irq(IRQ_CORETMR, &bfin_timer_irq); |
| 327 | return get_cclk() / TIME_SCALE; |
| 328 | } |
| 329 | |
| 330 | void __init setup_core_timer(void) |
| 331 | { |
| 332 | bfin_timer_init(); |
| 333 | bfin_timer_set_mode(CLOCK_EVT_MODE_PERIODIC, NULL); |
| 334 | } |
| 335 | #endif /* CONFIG_TICKSOURCE_GPTMR0 */ |
| 336 | |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 337 | /* |
| 338 | * timer_interrupt() needs to keep up the real-time clock, |
| 339 | * as well as call the "do_timer()" routine every clocktick |
| 340 | */ |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 341 | irqreturn_t timer_interrupt(int irq, void *dev_id) |
| 342 | { |
| 343 | struct clock_event_device *evt = dev_id; |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 344 | smp_mb(); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 345 | evt->event_handler(evt); |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 346 | bfin_timer_ack(); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 347 | return IRQ_HANDLED; |
| 348 | } |
| 349 | |
| 350 | static int __init bfin_clockevent_init(void) |
| 351 | { |
Vitja Makarov | 1bfb4b2 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 352 | unsigned long timer_clk; |
| 353 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 354 | timer_clk = bfin_clockevent_check(); |
Vitja Makarov | 1bfb4b2 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 355 | |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 356 | bfin_timer_init(); |
| 357 | |
Vitja Makarov | 1bfb4b2 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 358 | clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 359 | clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin); |
| 360 | clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin); |
Rusty Russell | 320ab2b | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 361 | clockevent_bfin.cpumask = cpumask_of(0); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 362 | clockevents_register_device(&clockevent_bfin); |
| 363 | |
| 364 | return 0; |
| 365 | } |
| 366 | |
| 367 | void __init time_init(void) |
| 368 | { |
| 369 | time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */ |
| 370 | |
| 371 | #ifdef CONFIG_RTC_DRV_BFIN |
| 372 | /* [#2663] hack to filter junk RTC values that would cause |
| 373 | * userspace to have to deal with time values greater than |
| 374 | * 2^31 seconds (which uClibc cannot cope with yet) |
| 375 | */ |
| 376 | if ((bfin_read_RTC_STAT() & 0xC0000000) == 0xC0000000) { |
| 377 | printk(KERN_NOTICE "bfin-rtc: invalid date; resetting\n"); |
| 378 | bfin_write_RTC_STAT(0); |
| 379 | } |
| 380 | #endif |
| 381 | |
| 382 | /* Initialize xtime. From now on, xtime is updated with timer interrupts */ |
| 383 | xtime.tv_sec = secs_since_1970; |
| 384 | xtime.tv_nsec = 0; |
| 385 | set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); |
| 386 | |
Graf Yang | 1fa9be7 | 2009-05-15 11:01:59 +0000 | [diff] [blame] | 387 | bfin_cs_cycles_init(); |
| 388 | bfin_cs_gptimer0_init(); |
Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 389 | bfin_clockevent_init(); |
| 390 | } |