blob: 99a1fb3eb7fdf17a462f4d10ddf1bc26cf108657 [file] [log] [blame]
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -08001/* linux/arch/arm/mach-msm/timer.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004 * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -08005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/init.h>
18#include <linux/time.h>
19#include <linux/interrupt.h>
20#include <linux/irq.h>
21#include <linux/clk.h>
22#include <linux/clockchips.h>
23#include <linux/delay.h>
Russell Kingfced80c2008-09-06 12:10:45 +010024#include <linux/io.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#include <linux/percpu.h>
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -080026
27#include <asm/mach/time.h>
Stephen Boydebf30dc2011-05-31 16:10:00 -070028#include <asm/hardware/gic.h>
Russell Kinga09e64f2008-08-05 16:14:15 +010029#include <mach/msm_iomap.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#include <mach/irqs.h>
31#include <mach/socinfo.h>
32
33#if defined(CONFIG_MSM_SMD)
34#include "smd_private.h"
35#endif
36#include "timer.h"
37
38enum {
39 MSM_TIMER_DEBUG_SYNC = 1U << 0,
40};
41static int msm_timer_debug_mask;
42module_param_named(debug_mask, msm_timer_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
43
44#if defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) || \
45 defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) || \
Rohit Vaswani2a473b22011-08-16 15:35:34 -070046 defined(CONFIG_ARCH_APQ8064) || defined(CONFIG_ARCH_MSM9615)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047#define MSM_GPT_BASE (MSM_TMR_BASE + 0x4)
48#define MSM_DGT_BASE (MSM_TMR_BASE + 0x24)
49#else
50#define MSM_GPT_BASE MSM_TMR_BASE
51#define MSM_DGT_BASE (MSM_TMR_BASE + 0x10)
52#endif
53
54#ifdef CONFIG_MSM7X00A_USE_GP_TIMER
55 #define DG_TIMER_RATING 100
56 #define MSM_GLOBAL_TIMER MSM_CLOCK_GPT
57#else
58 #define DG_TIMER_RATING 300
59 #define MSM_GLOBAL_TIMER MSM_CLOCK_DGT
60#endif
61
Rohit Vaswani2a473b22011-08-16 15:35:34 -070062#if defined(CONFIG_CPU_V6) || defined(CONFIG_ARCH_MSM7X27A)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063#define MSM_DGT_SHIFT (5)
64#else
65#define MSM_DGT_SHIFT (0)
66#endif
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -080067
68#define TIMER_MATCH_VAL 0x0000
69#define TIMER_COUNT_VAL 0x0004
70#define TIMER_ENABLE 0x0008
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -080071#define TIMER_CLEAR 0x000C
Jeff Ohlstein672039f2010-10-05 15:23:57 -070072#define DGT_CLK_CTL 0x0034
73enum {
74 DGT_CLK_CTL_DIV_1 = 0,
75 DGT_CLK_CTL_DIV_2 = 1,
76 DGT_CLK_CTL_DIV_3 = 2,
77 DGT_CLK_CTL_DIV_4 = 3,
78};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070079#define TIMER_ENABLE_EN 1
80#define TIMER_ENABLE_CLR_ON_MATCH_EN 2
81
82#define LOCAL_TIMER 0
83#define GLOBAL_TIMER 1
84
85/*
Jeff Ohlsteine1a7e402011-09-07 12:52:36 -070086 * global_timer_offset is added to the regbase of a timer to force the memory
87 * access to come from the CPU0 region.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088 */
Jeff Ohlsteine1a7e402011-09-07 12:52:36 -070089static int global_timer_offset;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070090
91#if defined(CONFIG_MSM_DIRECT_SCLK_ACCESS)
92#define MPM_SCLK_COUNT_VAL 0x0024
93#endif
94
95#define NR_TIMERS ARRAY_SIZE(msm_clocks)
96
Rohit Vaswaniffc76e92011-08-15 13:44:16 -070097#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_FSM9XXX)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098#define DGT_HZ 4800000 /* Uses TCXO/4 (19.2 MHz / 4) */
99#elif defined(CONFIG_ARCH_MSM7X30)
100#define DGT_HZ 6144000 /* Uses LPXO/4 (24.576 MHz / 4) */
101#elif defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) || \
Rohit Vaswani2a473b22011-08-16 15:35:34 -0700102 defined(CONFIG_ARCH_APQ8064) || defined(CONFIG_ARCH_MSM9615)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700103/* Uses PXO/4 (24.576 MHz / 4) on V1, (27 MHz / 4) on V2 */
104#define DGT_HZ 6750000
105#else
106#define DGT_HZ 19200000 /* Uses TCXO (19.2 MHz) */
107#endif
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -0800108
109#define GPT_HZ 32768
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110#define SCLK_HZ 32768
Jeff Ohlstein672039f2010-10-05 15:23:57 -0700111
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700112#if defined(CONFIG_MSM_N_WAY_SMSM)
113/* Time Master State Bits */
114#define MASTER_BITS_PER_CPU 1
115#define MASTER_TIME_PENDING \
116 (0x01UL << (MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
Jeff Ohlstein94790ec2010-12-02 12:05:12 -0800117
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118/* Time Slave State Bits */
119#define SLAVE_TIME_REQUEST 0x0400
120#define SLAVE_TIME_POLL 0x0800
121#define SLAVE_TIME_INIT 0x1000
Jeff Ohlstein672039f2010-10-05 15:23:57 -0700122#endif
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -0800123
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700124#ifdef CONFIG_SMP
125static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt);
126#endif
127static irqreturn_t msm_timer_interrupt(int irq, void *dev_id);
128static cycle_t msm_gpt_read(struct clocksource *cs);
129static cycle_t msm_dgt_read(struct clocksource *cs);
130static void msm_timer_set_mode(enum clock_event_mode mode,
131 struct clock_event_device *evt);
132static int msm_timer_set_next_event(unsigned long cycles,
133 struct clock_event_device *evt);
134
135enum {
136 MSM_CLOCK_FLAGS_UNSTABLE_COUNT = 1U << 0,
137 MSM_CLOCK_FLAGS_ODD_MATCH_WRITE = 1U << 1,
138 MSM_CLOCK_FLAGS_DELAYED_WRITE_POST = 1U << 2,
139};
140
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -0800141struct msm_clock {
142 struct clock_event_device clockevent;
143 struct clocksource clocksource;
144 struct irqaction irq;
Brian Swetlandbcc0f6a2008-09-10 14:00:53 -0700145 void __iomem *regbase;
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -0800146 uint32_t freq;
147 uint32_t shift;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700148 uint32_t flags;
149 uint32_t write_delay;
150 uint32_t rollover_offset;
151 uint32_t index;
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -0800152};
153
Jeff Ohlstein94790ec2010-12-02 12:05:12 -0800154enum {
155 MSM_CLOCK_GPT,
156 MSM_CLOCK_DGT,
Jeff Ohlstein94790ec2010-12-02 12:05:12 -0800157};
158
159
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700160struct msm_clock_percpu_data {
161 uint32_t last_set;
162 uint32_t sleep_offset;
163 uint32_t alarm_vtime;
164 uint32_t alarm;
165 uint32_t non_sleep_offset;
166 uint32_t in_sync;
167 cycle_t stopped_tick;
168 int stopped;
169 uint32_t last_sync_gpt;
170 u64 last_sync_jiffies;
171};
Jeff Ohlstein94790ec2010-12-02 12:05:12 -0800172
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700173struct msm_timer_sync_data_t {
174 struct msm_clock *clock;
175 uint32_t timeout;
176 int exit_sleep;
177};
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -0800178
179static struct msm_clock msm_clocks[] = {
Jeff Ohlstein94790ec2010-12-02 12:05:12 -0800180 [MSM_CLOCK_GPT] = {
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -0800181 .clockevent = {
182 .name = "gp_timer",
183 .features = CLOCK_EVT_FEAT_ONESHOT,
184 .shift = 32,
185 .rating = 200,
186 .set_next_event = msm_timer_set_next_event,
187 .set_mode = msm_timer_set_mode,
188 },
189 .clocksource = {
190 .name = "gp_timer",
191 .rating = 200,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192 .read = msm_gpt_read,
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -0800193 .mask = CLOCKSOURCE_MASK(32),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194 .shift = 17,
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -0800195 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
196 },
197 .irq = {
198 .name = "gp_timer",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199 .flags = IRQF_DISABLED | IRQF_TIMER |
200 IRQF_TRIGGER_RISING,
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -0800201 .handler = msm_timer_interrupt,
202 .dev_id = &msm_clocks[0].clockevent,
203 .irq = INT_GP_TIMER_EXP
204 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700205 .regbase = MSM_GPT_BASE,
Jeff Ohlstein94790ec2010-12-02 12:05:12 -0800206 .freq = GPT_HZ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207 .index = MSM_CLOCK_GPT,
208 .flags =
Rohit Vaswani2a473b22011-08-16 15:35:34 -0700209#if defined(CONFIG_CPU_V6) || defined(CONFIG_ARCH_MSM7X27A)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210 MSM_CLOCK_FLAGS_UNSTABLE_COUNT |
211 MSM_CLOCK_FLAGS_ODD_MATCH_WRITE |
212 MSM_CLOCK_FLAGS_DELAYED_WRITE_POST |
213#endif
214 0,
215 .write_delay = 9,
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -0800216 },
Jeff Ohlstein94790ec2010-12-02 12:05:12 -0800217 [MSM_CLOCK_DGT] = {
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -0800218 .clockevent = {
219 .name = "dg_timer",
220 .features = CLOCK_EVT_FEAT_ONESHOT,
221 .shift = 32 + MSM_DGT_SHIFT,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222 .rating = DG_TIMER_RATING,
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -0800223 .set_next_event = msm_timer_set_next_event,
224 .set_mode = msm_timer_set_mode,
225 },
226 .clocksource = {
227 .name = "dg_timer",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228 .rating = DG_TIMER_RATING,
229 .read = msm_dgt_read,
230 .mask = CLOCKSOURCE_MASK((32-MSM_DGT_SHIFT)),
231 .shift = 24 - MSM_DGT_SHIFT,
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -0800232 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
233 },
234 .irq = {
235 .name = "dg_timer",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236 .flags = IRQF_DISABLED | IRQF_TIMER |
237 IRQF_TRIGGER_RISING,
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -0800238 .handler = msm_timer_interrupt,
239 .dev_id = &msm_clocks[1].clockevent,
240 .irq = INT_DEBUG_TIMER_EXP
241 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242 .regbase = MSM_DGT_BASE,
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -0800243 .freq = DGT_HZ >> MSM_DGT_SHIFT,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244 .index = MSM_CLOCK_DGT,
Jeff Ohlstein94790ec2010-12-02 12:05:12 -0800245 .shift = MSM_DGT_SHIFT,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700246 .write_delay = 9,
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -0800247 }
248};
249
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700250static DEFINE_PER_CPU(struct clock_event_device*, local_clock_event);
251
252static DEFINE_PER_CPU(struct msm_clock_percpu_data[NR_TIMERS],
253 msm_clocks_percpu);
254
255static DEFINE_PER_CPU(struct msm_clock *, msm_active_clock);
256
257static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
258{
259 struct clock_event_device *evt = dev_id;
260 if (smp_processor_id() != 0)
261 evt = __get_cpu_var(local_clock_event);
262 if (evt->event_handler == NULL)
263 return IRQ_HANDLED;
264 evt->event_handler(evt);
265 return IRQ_HANDLED;
266}
267
268static uint32_t msm_read_timer_count(struct msm_clock *clock, int global)
269{
270 uint32_t t1, t2;
271 int loop_count = 0;
272
273 if (global)
274 t1 = __raw_readl(clock->regbase + TIMER_COUNT_VAL +
Jeff Ohlsteine1a7e402011-09-07 12:52:36 -0700275 global_timer_offset);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276 else
277 t1 = __raw_readl(clock->regbase + TIMER_COUNT_VAL);
278
279 if (!(clock->flags & MSM_CLOCK_FLAGS_UNSTABLE_COUNT))
280 return t1;
281 while (1) {
282 if (global)
283 t2 = __raw_readl(clock->regbase + TIMER_COUNT_VAL +
Jeff Ohlsteine1a7e402011-09-07 12:52:36 -0700284 global_timer_offset);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285 else
286 t2 = __raw_readl(clock->regbase + TIMER_COUNT_VAL);
287 if (t1 == t2)
288 return t1;
289 if (loop_count++ > 10) {
290 printk(KERN_ERR "msm_read_timer_count timer %s did not"
291 "stabilize %u != %u\n", clock->clockevent.name,
292 t2, t1);
293 return t2;
294 }
295 t1 = t2;
296 }
297}
298
299static cycle_t msm_gpt_read(struct clocksource *cs)
300{
301 struct msm_clock *clock = &msm_clocks[MSM_CLOCK_GPT];
302 struct msm_clock_percpu_data *clock_state =
303 &per_cpu(msm_clocks_percpu, 0)[MSM_CLOCK_GPT];
304
305 if (clock_state->stopped)
306 return clock_state->stopped_tick;
307
308 return msm_read_timer_count(clock, GLOBAL_TIMER) +
309 clock_state->sleep_offset;
310}
311
312static cycle_t msm_dgt_read(struct clocksource *cs)
313{
314 struct msm_clock *clock = &msm_clocks[MSM_CLOCK_DGT];
315 struct msm_clock_percpu_data *clock_state =
316 &per_cpu(msm_clocks_percpu, 0)[MSM_CLOCK_DGT];
317
318 if (clock_state->stopped)
319 return clock_state->stopped_tick >> MSM_DGT_SHIFT;
320
321 return (msm_read_timer_count(clock, GLOBAL_TIMER) +
322 clock_state->sleep_offset) >> MSM_DGT_SHIFT;
323}
324
325#ifdef CONFIG_SMP
326static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt)
327{
328 int i;
329 for (i = 0; i < NR_TIMERS; i++)
330 if (evt == &(msm_clocks[i].clockevent))
331 return &msm_clocks[i];
332 return &msm_clocks[MSM_GLOBAL_TIMER];
333}
334#endif
335
336static int msm_timer_set_next_event(unsigned long cycles,
337 struct clock_event_device *evt)
338{
339 int i;
340 struct msm_clock *clock;
341 struct msm_clock_percpu_data *clock_state;
342 uint32_t now;
343 uint32_t alarm;
344 int late;
345
346#ifdef CONFIG_SMP
347 clock = clockevent_to_clock(evt);
348#else
349 clock = container_of(evt, struct msm_clock, clockevent);
350#endif
351 clock_state = &__get_cpu_var(msm_clocks_percpu)[clock->index];
352 if (clock_state->stopped)
353 return 0;
354 now = msm_read_timer_count(clock, LOCAL_TIMER);
355 alarm = now + (cycles << clock->shift);
356 if (clock->flags & MSM_CLOCK_FLAGS_ODD_MATCH_WRITE)
357 while (now == clock_state->last_set)
358 now = msm_read_timer_count(clock, LOCAL_TIMER);
359
360 clock_state->alarm = alarm;
361 __raw_writel(alarm, clock->regbase + TIMER_MATCH_VAL);
362
363 if (clock->flags & MSM_CLOCK_FLAGS_DELAYED_WRITE_POST) {
364 /* read the counter four extra times to make sure write posts
365 before reading the time */
366 for (i = 0; i < 4; i++)
367 __raw_readl(clock->regbase + TIMER_COUNT_VAL);
368 }
369 now = msm_read_timer_count(clock, LOCAL_TIMER);
370 clock_state->last_set = now;
371 clock_state->alarm_vtime = alarm + clock_state->sleep_offset;
372 late = now - alarm;
373 if (late >= (int)(-clock->write_delay << clock->shift) &&
374 late < clock->freq*5)
375 return -ETIME;
376
377 return 0;
378}
379
380static void msm_timer_set_mode(enum clock_event_mode mode,
381 struct clock_event_device *evt)
382{
383 struct msm_clock *clock;
384 struct msm_clock_percpu_data *clock_state, *gpt_state;
385 unsigned long irq_flags;
386
387#ifdef CONFIG_SMP
388 clock = clockevent_to_clock(evt);
389#else
390 clock = container_of(evt, struct msm_clock, clockevent);
391#endif
392 clock_state = &__get_cpu_var(msm_clocks_percpu)[clock->index];
393 gpt_state = &__get_cpu_var(msm_clocks_percpu)[MSM_CLOCK_GPT];
394
395 local_irq_save(irq_flags);
396
397 switch (mode) {
398 case CLOCK_EVT_MODE_RESUME:
399 case CLOCK_EVT_MODE_PERIODIC:
400 break;
401 case CLOCK_EVT_MODE_ONESHOT:
402 clock_state->stopped = 0;
403 clock_state->sleep_offset =
404 -msm_read_timer_count(clock, LOCAL_TIMER) +
405 clock_state->stopped_tick;
406 get_cpu_var(msm_active_clock) = clock;
407 put_cpu_var(msm_active_clock);
408 __raw_writel(TIMER_ENABLE_EN, clock->regbase + TIMER_ENABLE);
409 if (irq_get_chip(clock->irq.irq) &&
410 irq_get_chip(clock->irq.irq)->irq_unmask) {
411 irq_get_chip(clock->irq.irq)->irq_unmask(
412 irq_get_irq_data(clock->irq.irq));
413 }
414 if (clock != &msm_clocks[MSM_CLOCK_GPT])
415 __raw_writel(TIMER_ENABLE_EN,
416 msm_clocks[MSM_CLOCK_GPT].regbase +
417 TIMER_ENABLE);
418 break;
419 case CLOCK_EVT_MODE_UNUSED:
420 case CLOCK_EVT_MODE_SHUTDOWN:
421 get_cpu_var(msm_active_clock) = NULL;
422 put_cpu_var(msm_active_clock);
423 clock_state->in_sync = 0;
424 clock_state->stopped = 1;
425 clock_state->stopped_tick =
426 msm_read_timer_count(clock, LOCAL_TIMER) +
427 clock_state->sleep_offset;
428 __raw_writel(0, clock->regbase + TIMER_MATCH_VAL);
429 if (irq_get_chip(clock->irq.irq) &&
430 irq_get_chip(clock->irq.irq)->irq_mask) {
431 irq_get_chip(clock->irq.irq)->irq_mask(
432 irq_get_irq_data(clock->irq.irq));
433 }
434#ifdef CONFIG_MSM_SMP
435 if (clock != &msm_clocks[MSM_CLOCK_DGT] || smp_processor_id())
436#endif
437 __raw_writel(0, clock->regbase + TIMER_ENABLE);
438 if (clock != &msm_clocks[MSM_CLOCK_GPT]) {
439 gpt_state->in_sync = 0;
440 __raw_writel(0, msm_clocks[MSM_CLOCK_GPT].regbase +
441 TIMER_ENABLE);
442 }
443 break;
444 }
445 wmb();
446 local_irq_restore(irq_flags);
447}
448
Jeff Ohlstein973871d2011-09-28 11:46:26 -0700449/* Call this after SMP init */
450void __iomem *msm_timer_get_timer0_base(void)
451{
452 return MSM_TMR_BASE + global_timer_offset;
453}
454
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455#ifdef CONFIG_PM
456/*
457 * Retrieve the cycle count from sclk and optionally synchronize local clock
458 * with the sclk value.
459 *
460 * time_start and time_expired are callbacks that must be specified. The
461 * protocol uses them to detect timeout. The update callback is optional.
462 * If not NULL, update will be called so that it can update local clock.
463 *
464 * The function does not use the argument data directly; it passes data to
465 * the callbacks.
466 *
467 * Return value:
468 * 0: the operation failed
469 * >0: the slow clock value after time-sync
470 */
471static void (*msm_timer_sync_timeout)(void);
472#if defined(CONFIG_MSM_DIRECT_SCLK_ACCESS)
473static uint32_t msm_timer_do_sync_to_sclk(
474 void (*time_start)(struct msm_timer_sync_data_t *data),
475 bool (*time_expired)(struct msm_timer_sync_data_t *data),
476 void (*update)(struct msm_timer_sync_data_t *, uint32_t, uint32_t),
477 struct msm_timer_sync_data_t *data)
478{
479 uint32_t t1, t2;
480 int loop_count = 10;
481 int loop_zero_count = 3;
482 int tmp = USEC_PER_SEC/SCLK_HZ/(loop_zero_count-1);
483
484 while (loop_zero_count--) {
485 t1 = __raw_readl(MSM_RPM_MPM_BASE + MPM_SCLK_COUNT_VAL);
486 do {
487 udelay(1);
488 t2 = t1;
489 t1 = __raw_readl(MSM_RPM_MPM_BASE + MPM_SCLK_COUNT_VAL);
490 } while ((t2 != t1) && --loop_count);
491
492 if (!loop_count) {
493 printk(KERN_EMERG "SCLK did not stabilize\n");
494 return 0;
495 }
496
497 if (t1)
498 break;
499
500 udelay(tmp);
501 }
502
503 if (!loop_zero_count) {
504 printk(KERN_EMERG "SCLK reads zero\n");
505 return 0;
506 }
507
508 if (update != NULL)
509 update(data, t1, SCLK_HZ);
510 return t1;
511}
512#elif defined(CONFIG_MSM_N_WAY_SMSM)
513static uint32_t msm_timer_do_sync_to_sclk(
514 void (*time_start)(struct msm_timer_sync_data_t *data),
515 bool (*time_expired)(struct msm_timer_sync_data_t *data),
516 void (*update)(struct msm_timer_sync_data_t *, uint32_t, uint32_t),
517 struct msm_timer_sync_data_t *data)
518{
519 uint32_t *smem_clock;
520 uint32_t smem_clock_val;
521 uint32_t state;
522
523 smem_clock = smem_alloc(SMEM_SMEM_SLOW_CLOCK_VALUE, sizeof(uint32_t));
524 if (smem_clock == NULL) {
525 printk(KERN_ERR "no smem clock\n");
526 return 0;
527 }
528
529 state = smsm_get_state(SMSM_MODEM_STATE);
530 if ((state & SMSM_INIT) == 0) {
531 printk(KERN_ERR "smsm not initialized\n");
532 return 0;
533 }
534
535 time_start(data);
536 while ((state = smsm_get_state(SMSM_TIME_MASTER_DEM)) &
537 MASTER_TIME_PENDING) {
538 if (time_expired(data)) {
539 printk(KERN_EMERG "get_smem_clock: timeout 1 still "
540 "invalid state %x\n", state);
541 msm_timer_sync_timeout();
542 }
543 }
544
545 smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_POLL | SLAVE_TIME_INIT,
546 SLAVE_TIME_REQUEST);
547
548 time_start(data);
549 while (!((state = smsm_get_state(SMSM_TIME_MASTER_DEM)) &
550 MASTER_TIME_PENDING)) {
551 if (time_expired(data)) {
552 printk(KERN_EMERG "get_smem_clock: timeout 2 still "
553 "invalid state %x\n", state);
554 msm_timer_sync_timeout();
555 }
556 }
557
558 smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_REQUEST, SLAVE_TIME_POLL);
559
560 time_start(data);
561 do {
562 smem_clock_val = *smem_clock;
563 } while (smem_clock_val == 0 && !time_expired(data));
564
565 state = smsm_get_state(SMSM_TIME_MASTER_DEM);
566
567 if (smem_clock_val) {
568 if (update != NULL)
569 update(data, smem_clock_val, SCLK_HZ);
570
571 if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC)
572 printk(KERN_INFO
573 "get_smem_clock: state %x clock %u\n",
574 state, smem_clock_val);
575 } else {
576 printk(KERN_EMERG
577 "get_smem_clock: timeout state %x clock %u\n",
578 state, smem_clock_val);
579 msm_timer_sync_timeout();
580 }
581
582 smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_REQUEST | SLAVE_TIME_POLL,
583 SLAVE_TIME_INIT);
584 return smem_clock_val;
585}
586#else /* CONFIG_MSM_N_WAY_SMSM */
587static uint32_t msm_timer_do_sync_to_sclk(
588 void (*time_start)(struct msm_timer_sync_data_t *data),
589 bool (*time_expired)(struct msm_timer_sync_data_t *data),
590 void (*update)(struct msm_timer_sync_data_t *, uint32_t, uint32_t),
591 struct msm_timer_sync_data_t *data)
592{
593 uint32_t *smem_clock;
594 uint32_t smem_clock_val;
595 uint32_t last_state;
596 uint32_t state;
597
598 smem_clock = smem_alloc(SMEM_SMEM_SLOW_CLOCK_VALUE,
599 sizeof(uint32_t));
600
601 if (smem_clock == NULL) {
602 printk(KERN_ERR "no smem clock\n");
603 return 0;
604 }
605
606 last_state = state = smsm_get_state(SMSM_MODEM_STATE);
607 smem_clock_val = *smem_clock;
608 if (smem_clock_val) {
609 printk(KERN_INFO "get_smem_clock: invalid start state %x "
610 "clock %u\n", state, smem_clock_val);
611 smsm_change_state(SMSM_APPS_STATE,
612 SMSM_TIMEWAIT, SMSM_TIMEINIT);
613
614 time_start(data);
615 while (*smem_clock != 0 && !time_expired(data))
616 ;
617
618 smem_clock_val = *smem_clock;
619 if (smem_clock_val) {
620 printk(KERN_EMERG "get_smem_clock: timeout still "
621 "invalid state %x clock %u\n",
622 state, smem_clock_val);
623 msm_timer_sync_timeout();
624 }
625 }
626
627 time_start(data);
628 smsm_change_state(SMSM_APPS_STATE, SMSM_TIMEINIT, SMSM_TIMEWAIT);
629 do {
630 smem_clock_val = *smem_clock;
631 state = smsm_get_state(SMSM_MODEM_STATE);
632 if (state != last_state) {
633 last_state = state;
634 if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC)
635 printk(KERN_INFO
636 "get_smem_clock: state %x clock %u\n",
637 state, smem_clock_val);
638 }
639 } while (smem_clock_val == 0 && !time_expired(data));
640
641 if (smem_clock_val) {
642 if (update != NULL)
643 update(data, smem_clock_val, SCLK_HZ);
644 } else {
645 printk(KERN_EMERG
646 "get_smem_clock: timeout state %x clock %u\n",
647 state, smem_clock_val);
648 msm_timer_sync_timeout();
649 }
650
651 smsm_change_state(SMSM_APPS_STATE, SMSM_TIMEWAIT, SMSM_TIMEINIT);
652 return smem_clock_val;
653}
654#endif /* CONFIG_MSM_N_WAY_SMSM */
655
656/*
657 * Callback function that initializes the timeout value.
658 */
659static void msm_timer_sync_to_sclk_time_start(
660 struct msm_timer_sync_data_t *data)
661{
662 /* approx 2 seconds */
663 uint32_t delta = data->clock->freq << data->clock->shift << 1;
664 data->timeout = msm_read_timer_count(data->clock, LOCAL_TIMER) + delta;
665}
666
667/*
668 * Callback function that checks the timeout.
669 */
670static bool msm_timer_sync_to_sclk_time_expired(
671 struct msm_timer_sync_data_t *data)
672{
673 uint32_t delta = msm_read_timer_count(data->clock, LOCAL_TIMER) -
674 data->timeout;
675 return ((int32_t) delta) > 0;
676}
677
678/*
679 * Callback function that updates local clock from the specified source clock
680 * value and frequency.
681 */
682static void msm_timer_sync_update(struct msm_timer_sync_data_t *data,
683 uint32_t src_clk_val, uint32_t src_clk_freq)
684{
685 struct msm_clock *dst_clk = data->clock;
686 struct msm_clock_percpu_data *dst_clk_state =
687 &__get_cpu_var(msm_clocks_percpu)[dst_clk->index];
688 uint32_t dst_clk_val = msm_read_timer_count(dst_clk, LOCAL_TIMER);
689 uint32_t new_offset;
690
691 if ((dst_clk->freq << dst_clk->shift) == src_clk_freq) {
692 new_offset = src_clk_val - dst_clk_val;
693 } else {
694 uint64_t temp;
695
696 /* separate multiplication and division steps to reduce
697 rounding error */
698 temp = src_clk_val;
699 temp *= dst_clk->freq << dst_clk->shift;
700 do_div(temp, src_clk_freq);
701
702 new_offset = (uint32_t)(temp) - dst_clk_val;
703 }
704
705 if (dst_clk_state->sleep_offset + dst_clk_state->non_sleep_offset !=
706 new_offset) {
707 if (data->exit_sleep)
708 dst_clk_state->sleep_offset =
709 new_offset - dst_clk_state->non_sleep_offset;
710 else
711 dst_clk_state->non_sleep_offset =
712 new_offset - dst_clk_state->sleep_offset;
713
714 if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC)
715 printk(KERN_INFO "sync clock %s: "
716 "src %u, new offset %u + %u\n",
717 dst_clk->clocksource.name, src_clk_val,
718 dst_clk_state->sleep_offset,
719 dst_clk_state->non_sleep_offset);
720 }
721}
722
723/*
724 * Synchronize GPT clock with sclk.
725 */
726static void msm_timer_sync_gpt_to_sclk(int exit_sleep)
727{
728 struct msm_clock *gpt_clk = &msm_clocks[MSM_CLOCK_GPT];
729 struct msm_clock_percpu_data *gpt_clk_state =
730 &__get_cpu_var(msm_clocks_percpu)[MSM_CLOCK_GPT];
731 struct msm_timer_sync_data_t data;
732 uint32_t ret;
733
734 if (gpt_clk_state->in_sync)
735 return;
736
737 data.clock = gpt_clk;
738 data.timeout = 0;
739 data.exit_sleep = exit_sleep;
740
741 ret = msm_timer_do_sync_to_sclk(
742 msm_timer_sync_to_sclk_time_start,
743 msm_timer_sync_to_sclk_time_expired,
744 msm_timer_sync_update,
745 &data);
746
747 if (ret)
748 gpt_clk_state->in_sync = 1;
749}
750
751/*
752 * Synchronize clock with GPT clock.
753 */
754static void msm_timer_sync_to_gpt(struct msm_clock *clock, int exit_sleep)
755{
756 struct msm_clock *gpt_clk = &msm_clocks[MSM_CLOCK_GPT];
757 struct msm_clock_percpu_data *gpt_clk_state =
758 &__get_cpu_var(msm_clocks_percpu)[MSM_CLOCK_GPT];
759 struct msm_clock_percpu_data *clock_state =
760 &__get_cpu_var(msm_clocks_percpu)[clock->index];
761 struct msm_timer_sync_data_t data;
762 uint32_t gpt_clk_val;
763 u64 gpt_period = (1ULL << 32) * HZ / GPT_HZ;
764 u64 now = get_jiffies_64();
765
766 BUG_ON(clock == gpt_clk);
767
768 if (clock_state->in_sync &&
769 (now - clock_state->last_sync_jiffies < (gpt_period >> 1)))
770 return;
771
772 gpt_clk_val = msm_read_timer_count(gpt_clk, LOCAL_TIMER)
773 + gpt_clk_state->sleep_offset + gpt_clk_state->non_sleep_offset;
774
775 if (exit_sleep && gpt_clk_val < clock_state->last_sync_gpt)
776 clock_state->non_sleep_offset -= clock->rollover_offset;
777
778 data.clock = clock;
779 data.timeout = 0;
780 data.exit_sleep = exit_sleep;
781
782 msm_timer_sync_update(&data, gpt_clk_val, GPT_HZ);
783
784 clock_state->in_sync = 1;
785 clock_state->last_sync_gpt = gpt_clk_val;
786 clock_state->last_sync_jiffies = now;
787}
788
789static void msm_timer_reactivate_alarm(struct msm_clock *clock)
790{
791 struct msm_clock_percpu_data *clock_state =
792 &__get_cpu_var(msm_clocks_percpu)[clock->index];
793 long alarm_delta = clock_state->alarm_vtime -
794 clock_state->sleep_offset -
795 msm_read_timer_count(clock, LOCAL_TIMER);
796 alarm_delta >>= clock->shift;
797 if (alarm_delta < (long)clock->write_delay + 4)
798 alarm_delta = clock->write_delay + 4;
799 while (msm_timer_set_next_event(alarm_delta, &clock->clockevent))
800 ;
801}
802
803int64_t msm_timer_enter_idle(void)
804{
805 struct msm_clock *gpt_clk = &msm_clocks[MSM_CLOCK_GPT];
806 struct msm_clock *clock = __get_cpu_var(msm_active_clock);
807 struct msm_clock_percpu_data *clock_state =
808 &__get_cpu_var(msm_clocks_percpu)[clock->index];
809 uint32_t alarm;
810 uint32_t count;
811 int32_t delta;
812
813 BUG_ON(clock != &msm_clocks[MSM_CLOCK_GPT] &&
814 clock != &msm_clocks[MSM_CLOCK_DGT]);
815
816 msm_timer_sync_gpt_to_sclk(0);
817 if (clock != gpt_clk)
818 msm_timer_sync_to_gpt(clock, 0);
819
820 count = msm_read_timer_count(clock, LOCAL_TIMER);
821 if (clock_state->stopped++ == 0)
822 clock_state->stopped_tick = count + clock_state->sleep_offset;
823 alarm = clock_state->alarm;
824 delta = alarm - count;
825 if (delta <= -(int32_t)((clock->freq << clock->shift) >> 10)) {
826 /* timer should have triggered 1ms ago */
827 printk(KERN_ERR "msm_timer_enter_idle: timer late %d, "
828 "reprogram it\n", delta);
829 msm_timer_reactivate_alarm(clock);
830 }
831 if (delta <= 0)
832 return 0;
833 return clocksource_cyc2ns((alarm - count) >> clock->shift,
834 clock->clocksource.mult,
835 clock->clocksource.shift);
836}
837
838void msm_timer_exit_idle(int low_power)
839{
840 struct msm_clock *gpt_clk = &msm_clocks[MSM_CLOCK_GPT];
841 struct msm_clock *clock = __get_cpu_var(msm_active_clock);
842 struct msm_clock_percpu_data *gpt_clk_state =
843 &__get_cpu_var(msm_clocks_percpu)[MSM_CLOCK_GPT];
844 struct msm_clock_percpu_data *clock_state =
845 &__get_cpu_var(msm_clocks_percpu)[clock->index];
846 uint32_t enabled;
847
848 BUG_ON(clock != &msm_clocks[MSM_CLOCK_GPT] &&
849 clock != &msm_clocks[MSM_CLOCK_DGT]);
850
851 if (!low_power)
852 goto exit_idle_exit;
853
854 enabled = __raw_readl(gpt_clk->regbase + TIMER_ENABLE) &
855 TIMER_ENABLE_EN;
856 if (!enabled)
857 __raw_writel(TIMER_ENABLE_EN, gpt_clk->regbase + TIMER_ENABLE);
858
859#if defined(CONFIG_ARCH_MSM_SCORPION) || defined(CONFIG_ARCH_MSM_KRAIT)
860 gpt_clk_state->in_sync = 0;
861#else
862 gpt_clk_state->in_sync = gpt_clk_state->in_sync && enabled;
863#endif
864 /* Make sure timer is actually enabled before we sync it */
865 wmb();
866 msm_timer_sync_gpt_to_sclk(1);
867
868 if (clock == gpt_clk)
869 goto exit_idle_alarm;
870
871 enabled = __raw_readl(clock->regbase + TIMER_ENABLE) & TIMER_ENABLE_EN;
872 if (!enabled)
873 __raw_writel(TIMER_ENABLE_EN, clock->regbase + TIMER_ENABLE);
874
875#if defined(CONFIG_ARCH_MSM_SCORPION) || defined(CONFIG_ARCH_MSM_KRAIT)
876 clock_state->in_sync = 0;
877#else
878 clock_state->in_sync = clock_state->in_sync && enabled;
879#endif
880 /* Make sure timer is actually enabled before we sync it */
881 wmb();
882 msm_timer_sync_to_gpt(clock, 1);
883
884exit_idle_alarm:
885 msm_timer_reactivate_alarm(clock);
886
887exit_idle_exit:
888 clock_state->stopped--;
889}
890
891/*
892 * Callback function that initializes the timeout value.
893 */
894static void msm_timer_get_sclk_time_start(
895 struct msm_timer_sync_data_t *data)
896{
897 data->timeout = 200000;
898}
899
900/*
901 * Callback function that checks the timeout.
902 */
903static bool msm_timer_get_sclk_time_expired(
904 struct msm_timer_sync_data_t *data)
905{
906 udelay(10);
907 return --data->timeout <= 0;
908}
909
910/*
911 * Retrieve the cycle count from the sclk and convert it into
912 * nanoseconds.
913 *
914 * On exit, if period is not NULL, it contains the period of the
915 * sclk in nanoseconds, i.e. how long the cycle count wraps around.
916 *
917 * Return value:
918 * 0: the operation failed; period is not set either
919 * >0: time in nanoseconds
920 */
921int64_t msm_timer_get_sclk_time(int64_t *period)
922{
923 struct msm_timer_sync_data_t data;
924 uint32_t clock_value;
925 int64_t tmp;
926
927 memset(&data, 0, sizeof(data));
928 clock_value = msm_timer_do_sync_to_sclk(
929 msm_timer_get_sclk_time_start,
930 msm_timer_get_sclk_time_expired,
931 NULL,
932 &data);
933
934 if (!clock_value)
935 return 0;
936
937 if (period) {
938 tmp = 1LL << 32;
939 tmp = tmp * NSEC_PER_SEC / SCLK_HZ;
940 *period = tmp;
941 }
942
943 tmp = (int64_t)clock_value;
944 tmp = tmp * NSEC_PER_SEC / SCLK_HZ;
945 return tmp;
946}
947
948int __init msm_timer_init_time_sync(void (*timeout)(void))
949{
950#if defined(CONFIG_MSM_N_WAY_SMSM) && !defined(CONFIG_MSM_DIRECT_SCLK_ACCESS)
951 int ret = smsm_change_intr_mask(SMSM_TIME_MASTER_DEM, 0xFFFFFFFF, 0);
952
953 if (ret) {
954 printk(KERN_ERR "%s: failed to clear interrupt mask, %d\n",
955 __func__, ret);
956 return ret;
957 }
958
959 smsm_change_state(SMSM_APPS_DEM,
960 SLAVE_TIME_REQUEST | SLAVE_TIME_POLL, SLAVE_TIME_INIT);
961#endif
962
963 BUG_ON(timeout == NULL);
964 msm_timer_sync_timeout = timeout;
965
966 return 0;
967}
968
969#endif
970
971unsigned long long sched_clock(void)
972{
973 static cycle_t last_ticks;
974 static unsigned long long last_ns;
975 static DEFINE_SPINLOCK(msm_timer_sched_clock_lock);
976
977 struct msm_clock *clock;
978 struct clocksource *cs;
979 cycle_t ticks, delta;
980 unsigned long irq_flags;
981
982 clock = &msm_clocks[MSM_GLOBAL_TIMER];
983 cs = &clock->clocksource;
984
985 ticks = cs->read(cs);
986
987 spin_lock_irqsave(&msm_timer_sched_clock_lock, irq_flags);
988 delta = (ticks - last_ticks) & cs->mask;
989
990 if (delta < cs->mask/2) {
991 last_ticks += delta;
992 last_ns += clocksource_cyc2ns(delta, cs->mult, cs->shift);
993 }
994
995 ticks = last_ticks;
996 spin_unlock_irqrestore(&msm_timer_sched_clock_lock, irq_flags);
997
998 return last_ns;
999}
1000
1001#ifdef CONFIG_MSM_SMP
1002int read_current_timer(unsigned long *timer_val)
1003{
1004 struct msm_clock *dgt = &msm_clocks[MSM_CLOCK_DGT];
1005 *timer_val = msm_read_timer_count(dgt, GLOBAL_TIMER);
1006 return 0;
1007}
1008#endif
1009
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -08001010static void __init msm_timer_init(void)
1011{
1012 int i;
1013 int res;
David Brown8c27e6f2011-01-07 10:20:49 -08001014
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001015#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) || \
Rohit Vaswani2a473b22011-08-16 15:35:34 -07001016 defined(CONFIG_ARCH_APQ8064) || defined(CONFIG_ARCH_MSM9615)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001017 __raw_writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
Jeff Ohlstein672039f2010-10-05 15:23:57 -07001018#endif
1019
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -08001020 for (i = 0; i < ARRAY_SIZE(msm_clocks); i++) {
1021 struct msm_clock *clock = &msm_clocks[i];
1022 struct clock_event_device *ce = &clock->clockevent;
1023 struct clocksource *cs = &clock->clocksource;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001024 __raw_writel(0, clock->regbase + TIMER_ENABLE);
1025 __raw_writel(1, clock->regbase + TIMER_CLEAR);
1026 __raw_writel(0, clock->regbase + TIMER_COUNT_VAL);
1027 __raw_writel(~0, clock->regbase + TIMER_MATCH_VAL);
David Brown8c27e6f2011-01-07 10:20:49 -08001028
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001029 if ((clock->freq << clock->shift) == GPT_HZ) {
1030 clock->rollover_offset = 0;
1031 } else {
1032 uint64_t temp;
David Brown8c27e6f2011-01-07 10:20:49 -08001033
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001034 temp = clock->freq << clock->shift;
1035 temp <<= 32;
1036 temp /= GPT_HZ;
1037
1038 clock->rollover_offset = (uint32_t) temp;
1039 }
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -08001040
1041 ce->mult = div_sc(clock->freq, NSEC_PER_SEC, ce->shift);
1042 /* allow at least 10 seconds to notice that the timer wrapped */
1043 ce->max_delta_ns =
1044 clockevent_delta2ns(0xf0000000 >> clock->shift, ce);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001045 /* ticks gets rounded down by one */
1046 ce->min_delta_ns =
1047 clockevent_delta2ns(clock->write_delay + 4, ce);
Rusty Russell320ab2b2008-12-13 21:20:26 +10301048 ce->cpumask = cpumask_of(0);
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -08001049
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001050 cs->mult = clocksource_hz2mult(clock->freq, cs->shift);
1051 res = clocksource_register(cs);
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -08001052 if (res)
1053 printk(KERN_ERR "msm_timer_init: clocksource_register "
1054 "failed for %s\n", cs->name);
1055
1056 res = setup_irq(clock->irq.irq, &clock->irq);
1057 if (res)
1058 printk(KERN_ERR "msm_timer_init: setup_irq "
1059 "failed for %s\n", cs->name);
1060
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001061 irq_get_chip(clock->irq.irq)->irq_mask(irq_get_irq_data(
1062 clock->irq.irq));
1063
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -08001064 clockevents_register_device(ce);
1065 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001066#ifdef CONFIG_MSM_SMP
1067 __raw_writel(1, msm_clocks[MSM_CLOCK_DGT].regbase + TIMER_ENABLE);
1068 set_delay_fn(read_current_timer_delay_loop);
1069#endif
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -08001070}
1071
Jeff Ohlstein94790ec2010-12-02 12:05:12 -08001072#ifdef CONFIG_SMP
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001073
Santosh Shilimkaraf90f102011-02-23 18:53:15 +01001074int __cpuinit local_timer_setup(struct clock_event_device *evt)
Jeff Ohlstein94790ec2010-12-02 12:05:12 -08001075{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001076 unsigned long flags;
1077 static bool first_boot = true;
Jeff Ohlstein94790ec2010-12-02 12:05:12 -08001078 struct msm_clock *clock = &msm_clocks[MSM_GLOBAL_TIMER];
1079
1080 /* Use existing clock_event for cpu 0 */
1081 if (!smp_processor_id())
David Brown893b66c2011-03-30 11:26:57 -07001082 return 0;
Jeff Ohlstein94790ec2010-12-02 12:05:12 -08001083
Jeff Ohlsteine1a7e402011-09-07 12:52:36 -07001084 global_timer_offset = MSM_TMR0_BASE - MSM_TMR_BASE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001085 __raw_writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
Jeff Ohlstein94790ec2010-12-02 12:05:12 -08001086
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001087 if (first_boot) {
1088 __raw_writel(0, clock->regbase + TIMER_ENABLE);
1089 __raw_writel(0, clock->regbase + TIMER_CLEAR);
1090 __raw_writel(~0, clock->regbase + TIMER_MATCH_VAL);
1091 first_boot = false;
Jeff Ohlstein94790ec2010-12-02 12:05:12 -08001092 }
1093 evt->irq = clock->irq.irq;
1094 evt->name = "local_timer";
1095 evt->features = CLOCK_EVT_FEAT_ONESHOT;
1096 evt->rating = clock->clockevent.rating;
1097 evt->set_mode = msm_timer_set_mode;
1098 evt->set_next_event = msm_timer_set_next_event;
1099 evt->shift = clock->clockevent.shift;
1100 evt->mult = div_sc(clock->freq, NSEC_PER_SEC, evt->shift);
1101 evt->max_delta_ns =
1102 clockevent_delta2ns(0xf0000000 >> clock->shift, evt);
1103 evt->min_delta_ns = clockevent_delta2ns(4, evt);
1104
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001105 __get_cpu_var(local_clock_event) = evt;
Jeff Ohlstein94790ec2010-12-02 12:05:12 -08001106
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001107 local_irq_save(flags);
1108 gic_clear_spi_pending(clock->irq.irq);
1109 local_irq_restore(flags);
Jeff Ohlstein94790ec2010-12-02 12:05:12 -08001110 gic_enable_ppi(clock->irq.irq);
1111
1112 clockevents_register_device(evt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001113
Santosh Shilimkaraf90f102011-02-23 18:53:15 +01001114 return 0;
Jeff Ohlstein94790ec2010-12-02 12:05:12 -08001115}
1116
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001117int local_timer_ack(void)
Jeff Ohlstein94790ec2010-12-02 12:05:12 -08001118{
1119 return 1;
1120}
Jeff Ohlstein94790ec2010-12-02 12:05:12 -08001121#endif
1122
Arve Hjønnevåg3e4ea372007-11-26 04:11:58 -08001123struct sys_timer msm_timer = {
1124 .init = msm_timer_init
1125};