blob: 23d310d23f13fceb309d5830f7a1979419775e60 [file] [log] [blame]
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -08001/*
2 * linux/arch/arm/kernel/arch_timer.c
3 *
4 * Copyright (C) 2011 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
Sathish Ambley8a309822011-11-07 14:49:08 -080014#include <linux/timex.h>
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -080015#include <linux/device.h>
16#include <linux/smp.h>
17#include <linux/cpu.h>
18#include <linux/jiffies.h>
19#include <linux/clockchips.h>
20#include <linux/interrupt.h>
Marc Zyngierf2caa512012-01-19 13:53:50 +000021#include <linux/of_irq.h>
Abhimanyu Kapur05b66442012-05-31 23:28:23 -070022#include <linux/of_address.h>
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -080023#include <linux/io.h>
Sathish Ambley8a309822011-11-07 14:49:08 -080024#include <linux/irq.h>
Jeff Ohlstein57808a62012-07-16 13:39:26 -070025#include <linux/export.h>
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -080026
27#include <asm/cputype.h>
Marc Zyngierdf590cc2012-01-11 17:25:17 +000028#include <asm/localtimer.h>
29#include <asm/arch_timer.h>
Sathish Ambley8a309822011-11-07 14:49:08 -080030#include <asm/sched_clock.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070031#include <asm/hardware/gic.h>
32#include <asm/system_info.h>
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -080033
34static unsigned long arch_timer_rate;
35static int arch_timer_ppi;
36static int arch_timer_ppi2;
Abhimanyu Kapur05b66442012-05-31 23:28:23 -070037static int is_irq_percpu;
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -080038
Marc Zyngierdf590cc2012-01-11 17:25:17 +000039static struct clock_event_device __percpu **arch_timer_evt;
Abhimanyu Kapur05b66442012-05-31 23:28:23 -070040static void __iomem *timer_base;
41
42static u32 timer_reg_read_cp15(int reg);
43static void timer_reg_write_cp15(int reg, u32 val);
44static inline cycle_t counter_get_cntpct_cp15(void);
45static inline cycle_t counter_get_cntvct_cp15(void);
46
47static u32 timer_reg_read_mem(int reg);
48static void timer_reg_write_mem(int reg, u32 val);
49static inline cycle_t counter_get_cntpct_mem(void);
50static inline cycle_t counter_get_cntvct_mem(void);
51
52struct arch_timer_operations {
53 void (*reg_write)(int, u32);
54 u32 (*reg_read)(int);
55 cycle_t (*get_cntpct)(void);
56 cycle_t (*get_cntvct)(void);
57};
58
59static struct arch_timer_operations arch_timer_ops_cp15 = {
60 .reg_read = &timer_reg_read_cp15,
61 .reg_write = &timer_reg_write_cp15,
62 .get_cntpct = &counter_get_cntpct_cp15,
63 .get_cntvct = &counter_get_cntvct_cp15,
64};
65
66static struct arch_timer_operations arch_timer_ops_mem = {
67 .reg_read = &timer_reg_read_mem,
68 .reg_write = &timer_reg_write_mem,
69 .get_cntpct = &counter_get_cntpct_mem,
70 .get_cntvct = &counter_get_cntvct_mem,
71};
72
73static struct arch_timer_operations *arch_specific_timer = &arch_timer_ops_cp15;
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -080074
75/*
76 * Architected system timer support.
77 */
78
79#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
80#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
Marc Zyngierdf590cc2012-01-11 17:25:17 +000081#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -080082
83#define ARCH_TIMER_REG_CTRL 0
84#define ARCH_TIMER_REG_FREQ 1
85#define ARCH_TIMER_REG_TVAL 2
86
Abhimanyu Kapur05b66442012-05-31 23:28:23 -070087/* Iomapped Register Offsets */
88#define QTIMER_CNTP_LOW_REG 0x000
89#define QTIMER_CNTP_HIGH_REG 0x004
90#define QTIMER_CNTV_LOW_REG 0x008
91#define QTIMER_CNTV_HIGH_REG 0x00C
92#define QTIMER_CTRL_REG 0x02C
93#define QTIMER_FREQ_REG 0x010
94#define QTIMER_CNTP_TVAL_REG 0x028
95#define QTIMER_CNTV_TVAL_REG 0x038
96
97static void timer_reg_write_mem(int reg, u32 val)
98{
99 switch (reg) {
100 case ARCH_TIMER_REG_CTRL:
101 __raw_writel(val, timer_base + QTIMER_CTRL_REG);
102 break;
103 case ARCH_TIMER_REG_TVAL:
104 __raw_writel(val, timer_base + QTIMER_CNTP_TVAL_REG);
105 break;
106 }
107}
108
109static void timer_reg_write_cp15(int reg, u32 val)
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800110{
111 switch (reg) {
112 case ARCH_TIMER_REG_CTRL:
113 asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
114 break;
115 case ARCH_TIMER_REG_TVAL:
116 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
117 break;
118 }
119
120 isb();
121}
122
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700123static u32 timer_reg_read_mem(int reg)
124{
125 u32 val;
126
127 switch (reg) {
128 case ARCH_TIMER_REG_CTRL:
129 val = __raw_readl(timer_base + QTIMER_CTRL_REG);
130 break;
131 case ARCH_TIMER_REG_FREQ:
132 val = __raw_readl(timer_base + QTIMER_FREQ_REG);
133 break;
134 case ARCH_TIMER_REG_TVAL:
135 val = __raw_readl(timer_base + QTIMER_CNTP_TVAL_REG);
136 break;
137 default:
138 BUG();
139 }
140
141 return val;
142}
143
144static u32 timer_reg_read_cp15(int reg)
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800145{
146 u32 val;
147
148 switch (reg) {
149 case ARCH_TIMER_REG_CTRL:
150 asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
151 break;
152 case ARCH_TIMER_REG_FREQ:
153 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
154 break;
155 case ARCH_TIMER_REG_TVAL:
156 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
157 break;
158 default:
159 BUG();
160 }
161
162 return val;
163}
164
165static irqreturn_t arch_timer_handler(int irq, void *dev_id)
166{
Sathish Ambley8a309822011-11-07 14:49:08 -0800167 struct clock_event_device *evt;
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800168 unsigned long ctrl;
169
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700170 ctrl = arch_specific_timer->reg_read(ARCH_TIMER_REG_CTRL);
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000171 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800172 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700173 arch_specific_timer->reg_write(ARCH_TIMER_REG_CTRL,
174 ctrl);
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000175 evt = *__this_cpu_ptr(arch_timer_evt);
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800176 evt->event_handler(evt);
177 return IRQ_HANDLED;
178 }
179
180 return IRQ_NONE;
181}
182
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000183static void arch_timer_disable(void)
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800184{
185 unsigned long ctrl;
186
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700187 ctrl = arch_specific_timer->reg_read(ARCH_TIMER_REG_CTRL);
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800188 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700189 arch_specific_timer->reg_write(ARCH_TIMER_REG_CTRL, ctrl);
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800190}
191
192static void arch_timer_set_mode(enum clock_event_mode mode,
193 struct clock_event_device *clk)
194{
195 switch (mode) {
196 case CLOCK_EVT_MODE_UNUSED:
197 case CLOCK_EVT_MODE_SHUTDOWN:
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000198 arch_timer_disable();
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800199 break;
200 default:
201 break;
202 }
203}
204
205static int arch_timer_set_next_event(unsigned long evt,
206 struct clock_event_device *unused)
207{
208 unsigned long ctrl;
209
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700210 ctrl = arch_specific_timer->reg_read(ARCH_TIMER_REG_CTRL);
Rohit Vaswani91bb03f2012-08-14 11:18:41 -0700211 ctrl &= ~(ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_MASK);
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700212 arch_specific_timer->reg_write(ARCH_TIMER_REG_CTRL, ctrl);
213 arch_specific_timer->reg_write(ARCH_TIMER_REG_TVAL, evt);
Rohit Vaswani91bb03f2012-08-14 11:18:41 -0700214 ctrl |= ARCH_TIMER_CTRL_ENABLE;
215 arch_specific_timer->reg_write(ARCH_TIMER_REG_CTRL, ctrl);
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800216
217 return 0;
218}
219
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000220static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800221{
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000222 /* setup clock event only once for CPU 0 */
223 if (!smp_processor_id() && clk->irq == arch_timer_ppi)
224 return 0;
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800225
226 /* Be safe... */
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000227 arch_timer_disable();
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800228
229 clk->features = CLOCK_EVT_FEAT_ONESHOT;
230 clk->name = "arch_sys_timer";
231 clk->rating = 450;
232 clk->set_mode = arch_timer_set_mode;
233 clk->set_next_event = arch_timer_set_next_event;
234 clk->irq = arch_timer_ppi;
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800235
236 clockevents_config_and_register(clk, arch_timer_rate,
237 0xf, 0x7fffffff);
238
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000239 *__this_cpu_ptr(arch_timer_evt) = clk;
240
241 enable_percpu_irq(clk->irq, 0);
242 if (arch_timer_ppi2)
Trilok Sonieecb28c2011-07-20 16:24:14 +0100243 enable_percpu_irq(arch_timer_ppi2, 0);
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000244
245 return 0;
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800246}
247
248/* Is the optional system timer available? */
249static int local_timer_is_architected(void)
250{
251 return (cpu_architecture() >= CPU_ARCH_ARMv7) &&
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700252 ((read_cpuid_ext(CPUID_EXT_PFR1) >> 16) & 0xf) == 1;
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800253}
254
255static int arch_timer_available(void)
256{
257 unsigned long freq;
258
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800259 if (arch_timer_rate == 0) {
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700260 arch_specific_timer->reg_write(ARCH_TIMER_REG_CTRL, 0);
261 freq = arch_specific_timer->reg_read(ARCH_TIMER_REG_FREQ);
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800262
263 /* Check the timer frequency. */
264 if (freq == 0) {
265 pr_warn("Architected timer frequency not available\n");
266 return -EINVAL;
267 }
268
269 arch_timer_rate = freq;
270 pr_info("Architected local timer running at %lu.%02luMHz.\n",
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000271 freq / 1000000, (freq / 10000) % 100);
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800272 }
273
274 return 0;
275}
276
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700277static inline cycle_t counter_get_cntpct_mem(void)
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800278{
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700279 u32 cvall, cvalh, thigh;
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800280
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700281 do {
282 cvalh = __raw_readl(timer_base + QTIMER_CNTP_HIGH_REG);
283 cvall = __raw_readl(timer_base + QTIMER_CNTP_LOW_REG);
284 thigh = __raw_readl(timer_base + QTIMER_CNTP_HIGH_REG);
285 } while (cvalh != thigh);
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800286
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000287 return ((cycle_t) cvalh << 32) | cvall;
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800288}
289
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700290static inline cycle_t counter_get_cntpct_cp15(void)
291{
292 u32 cvall, cvalh;
293
294 asm volatile("mrrc p15, 0, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
295 return ((cycle_t) cvalh << 32) | cvall;
296}
297
298static inline cycle_t counter_get_cntvct_mem(void)
299{
300 u32 cvall, cvalh, thigh;
301
302 do {
303 cvalh = __raw_readl(timer_base + QTIMER_CNTV_HIGH_REG);
304 cvall = __raw_readl(timer_base + QTIMER_CNTV_LOW_REG);
305 thigh = __raw_readl(timer_base + QTIMER_CNTV_HIGH_REG);
306 } while (cvalh != thigh);
307
308 return ((cycle_t) cvalh << 32) | cvall;
309}
310
311static inline cycle_t counter_get_cntvct_cp15(void)
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800312{
313 u32 cvall, cvalh;
314
315 asm volatile("mrrc p15, 1, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000316 return ((cycle_t) cvalh << 32) | cvall;
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800317}
318
Jeff Ohlstein57808a62012-07-16 13:39:26 -0700319cycle_t arch_counter_get_cntpct(void)
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800320{
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700321 return arch_specific_timer->get_cntpct();
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800322}
Jeff Ohlstein57808a62012-07-16 13:39:26 -0700323EXPORT_SYMBOL(arch_counter_get_cntpct);
324
325static cycle_t arch_counter_read(struct clocksource *cs)
326{
327 return arch_counter_get_cntpct();
328}
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800329
Sathish Ambley8a309822011-11-07 14:49:08 -0800330#ifdef ARCH_HAS_READ_CURRENT_TIMER
331int read_current_timer(unsigned long *timer_val)
332{
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700333 *timer_val = (unsigned long)arch_specific_timer->get_cntpct();
Sathish Ambley8a309822011-11-07 14:49:08 -0800334 return 0;
335}
336#endif
337
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800338static struct clocksource clocksource_counter = {
339 .name = "arch_sys_counter",
340 .rating = 400,
341 .read = arch_counter_read,
342 .mask = CLOCKSOURCE_MASK(56),
343 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
344};
345
Marc Zyngier165a4742011-11-11 14:30:44 -0800346static u32 arch_counter_get_cntvct32(void)
347{
348 cycle_t cntvct;
349
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700350 cntvct = arch_specific_timer->get_cntvct();
Marc Zyngier165a4742011-11-11 14:30:44 -0800351
352 /*
353 * The sched_clock infrastructure only knows about counters
354 * with at most 32bits. Forget about the upper 24 bits for the
355 * time being...
356 */
357 return (u32)(cntvct & (u32)~0);
358}
359
Steve Mucklef132c6c2012-06-06 18:30:57 -0700360static u32 notrace arch_timer_update_sched_clock(void)
Marc Zyngier165a4742011-11-11 14:30:44 -0800361{
Steve Mucklef132c6c2012-06-06 18:30:57 -0700362 return arch_counter_get_cntvct32();
Marc Zyngier165a4742011-11-11 14:30:44 -0800363}
364
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000365static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800366{
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800367 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
368 clk->irq, smp_processor_id());
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000369 disable_percpu_irq(clk->irq);
370 if (arch_timer_ppi2)
Trilok Sonieecb28c2011-07-20 16:24:14 +0100371 disable_percpu_irq(arch_timer_ppi2);
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800372 arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
373}
374
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000375static struct local_timer_ops arch_timer_ops __cpuinitdata = {
376 .setup = arch_timer_setup,
377 .stop = arch_timer_stop,
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800378};
379
Marc Zyngierf2caa512012-01-19 13:53:50 +0000380static int __init arch_timer_common_register(void)
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800381{
382 int err;
383
Rohit Vaswani67770332012-06-18 13:27:45 -0700384 if (timer_base)
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700385 arch_specific_timer = &arch_timer_ops_mem;
Rohit Vaswani67770332012-06-18 13:27:45 -0700386 else if (!local_timer_is_architected())
387 return -ENXIO;
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700388
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800389 err = arch_timer_available();
390 if (err)
391 return err;
392
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000393 arch_timer_evt = alloc_percpu(struct clock_event_device *);
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800394 if (!arch_timer_evt)
395 return -ENOMEM;
396
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800397 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
398
Steve Mucklef132c6c2012-06-06 18:30:57 -0700399 setup_sched_clock(arch_timer_update_sched_clock, 32, arch_timer_rate);
Sathish Ambley8a309822011-11-07 14:49:08 -0800400
401#ifdef ARCH_HAS_READ_CURRENT_TIMER
402 set_delay_fn(read_current_timer_delay_loop);
403#endif
404
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700405 if (is_irq_percpu)
406 err = request_percpu_irq(arch_timer_ppi, arch_timer_handler,
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000407 "arch_timer", arch_timer_evt);
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700408 else
409 err = request_irq(arch_timer_ppi, arch_timer_handler, 0,
410 "arch_timer", arch_timer_evt);
Sathish Ambley8a309822011-11-07 14:49:08 -0800411 if (err) {
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000412 pr_err("arch_timer: can't register interrupt %d (%d)\n",
413 arch_timer_ppi, err);
414 goto out_free;
Sathish Ambley8a309822011-11-07 14:49:08 -0800415 }
416
Marc Zyngierf2caa512012-01-19 13:53:50 +0000417 if (arch_timer_ppi2) {
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700418 if (is_irq_percpu)
419 err = request_percpu_irq(arch_timer_ppi2,
420 arch_timer_handler, "arch_timer",
421 arch_timer_evt);
422 else
423 err = request_irq(arch_timer_ppi2, arch_timer_handler,
424 0, "arch_timer", arch_timer_evt);
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000425 if (err) {
426 pr_err("arch_timer: can't register interrupt %d (%d)\n",
427 arch_timer_ppi2, err);
428 arch_timer_ppi2 = 0;
429 goto out_free_irq;
430 }
Sathish Ambley8a309822011-11-07 14:49:08 -0800431 }
Marc Zyngier165a4742011-11-11 14:30:44 -0800432
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000433 err = local_timer_register(&arch_timer_ops);
434 if (err)
435 goto out_free_irq;
436 percpu_timer_setup();
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800437
438 return 0;
Marc Zyngierdf590cc2012-01-11 17:25:17 +0000439
440out_free_irq:
441 free_percpu_irq(arch_timer_ppi, arch_timer_evt);
442 if (arch_timer_ppi2)
443 free_percpu_irq(arch_timer_ppi2, arch_timer_evt);
444
445out_free:
446 free_percpu(arch_timer_evt);
447
448 return err;
Marc Zyngierf5b3b2b2011-11-07 14:28:33 -0800449}
Marc Zyngierf2caa512012-01-19 13:53:50 +0000450
451int __init arch_timer_register(struct arch_timer *at)
452{
453 if (at->res[0].start <= 0 || !(at->res[0].flags & IORESOURCE_IRQ))
454 return -EINVAL;
455
456 arch_timer_ppi = at->res[0].start;
457
458 if (at->res[1].start > 0 && (at->res[1].flags & IORESOURCE_IRQ))
459 arch_timer_ppi2 = at->res[1].start;
460
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700461 if (at->res[2].start > 0 && at->res[2].end > 0 &&
462 (at->res[2].flags & IORESOURCE_MEM))
463 timer_base = ioremap(at->res[2].start,
464 resource_size(&at->res[2]));
465
466 if (!timer_base) {
467 pr_err("arch_timer: cant map timer base\n");
468 return -ENOMEM;
469 }
470
Marc Zyngierf2caa512012-01-19 13:53:50 +0000471 return arch_timer_common_register();
472}
473
474#ifdef CONFIG_OF
475static const struct of_device_id arch_timer_of_match[] __initconst = {
476 { .compatible = "arm,armv7-timer", },
477 {},
478};
479
480int __init arch_timer_of_register(void)
481{
482 struct device_node *np;
483 u32 freq;
484 int ret;
485
486 np = of_find_matching_node(NULL, arch_timer_of_match);
487 if (!np) {
488 pr_err("arch_timer: can't find DT node\n");
489 return -ENODEV;
490 }
491
492 /* Try to determine the frequency from the device tree or CNTFRQ */
493 if (!of_property_read_u32(np, "clock-frequency", &freq))
494 arch_timer_rate = freq;
495
496 ret = irq_of_parse_and_map(np, 0);
497 if (ret <= 0) {
498 pr_err("arch_timer: interrupt not specified in timer node\n");
499 return -ENODEV;
500 }
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700501
Rohit Vaswani67770332012-06-18 13:27:45 -0700502 if (of_get_address(np, 0, NULL, NULL)) {
503 timer_base = of_iomap(np, 0);
504 if (!timer_base) {
505 pr_err("arch_timer: cant map timer base\n");
506 return -ENOMEM;
507 }
Abhimanyu Kapur05b66442012-05-31 23:28:23 -0700508 }
509
510 if (of_get_property(np, "irq-is-not-percpu", NULL))
511 is_irq_percpu = 0;
512 else
513 is_irq_percpu = 1;
514
Marc Zyngierf2caa512012-01-19 13:53:50 +0000515 arch_timer_ppi = ret;
516 ret = irq_of_parse_and_map(np, 1);
517 if (ret > 0)
518 arch_timer_ppi2 = ret;
519 pr_info("arch_timer: found %s irqs %d %d\n",
520 np->name, arch_timer_ppi, arch_timer_ppi2);
521
522 return arch_timer_common_register();
523}
524#endif