ARM: local timers: Add A15 architected timer support
Add support for the A15 generic timer and clocksource.
As the timer generates interrupts on a different PPI depending
on the execution mode (normal or secure), it is possible to
register two different PPIs.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Conflicts:
[Integrate to the recent patch which has changes to
local timer registration mechanism.
This fixes the crash seen during hotplug operations
where after a secondary CPU is brought back online,
the clock event device setup was happening as part
of the online notification mechanism which was too
late. With this change in the local timer mechanims,
the clock event device is now setup as part of the
secondary CPU boot initialization making it available
early enough for use.
Update the board file with the appropriate changes in
the argument for timer registration.]
arch/arm/Kconfig
arch/arm/include/asm/arch_timer.h
arch/arm/kernel/arch_timer.c
Change-Id: I0bc80097c145fb2aac2150db0c5dff3c5e215a58
Signed-off-by: Sathish Ambley <sambley@codeaurora.org>
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index 5070470..c6ff17c 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -22,15 +22,16 @@
#include <linux/irq.h>
#include <asm/cputype.h>
+#include <asm/localtimer.h>
+#include <asm/arch_timer.h>
#include <asm/sched_clock.h>
-#include <asm/hardware/gic.h>
static unsigned long arch_timer_rate;
static int arch_timer_ppi;
static int arch_timer_ppi2;
static DEFINE_CLOCK_DATA(cd);
-static struct clock_event_device __percpu *arch_timer_evt;
+static struct clock_event_device __percpu **arch_timer_evt;
/*
* Architected system timer support.
@@ -38,6 +39,7 @@
#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
+#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
#define ARCH_TIMER_REG_CTRL 0
#define ARCH_TIMER_REG_FREQ 1
@@ -84,10 +86,10 @@
unsigned long ctrl;
ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
- if (ctrl & 0x4) {
+ if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
ctrl |= ARCH_TIMER_CTRL_IT_MASK;
arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
- evt = per_cpu_ptr(arch_timer_evt, smp_processor_id());
+ evt = *__this_cpu_ptr(arch_timer_evt);
evt->event_handler(evt);
return IRQ_HANDLED;
}
@@ -95,7 +97,7 @@
return IRQ_NONE;
}
-static void arch_timer_stop(void)
+static void arch_timer_disable(void)
{
unsigned long ctrl;
@@ -110,7 +112,7 @@
switch (mode) {
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
- arch_timer_stop();
+ arch_timer_disable();
break;
default:
break;
@@ -132,12 +134,14 @@
return 0;
}
-static void __cpuinit arch_timer_setup(void *data)
+static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
{
- struct clock_event_device *clk = data;
+ /* setup clock event only once for CPU 0 */
+ if (!smp_processor_id() && clk->irq == arch_timer_ppi)
+ return 0;
/* Be safe... */
- arch_timer_stop();
+ arch_timer_disable();
clk->features = CLOCK_EVT_FEAT_ONESHOT;
clk->name = "arch_sys_timer";
@@ -145,14 +149,17 @@
clk->set_mode = arch_timer_set_mode;
clk->set_next_event = arch_timer_set_next_event;
clk->irq = arch_timer_ppi;
- clk->cpumask = cpumask_of(smp_processor_id());
clockevents_config_and_register(clk, arch_timer_rate,
0xf, 0x7fffffff);
- enable_percpu_irq(arch_timer_ppi, 0);
- if (arch_timer_ppi2 > 0)
+ *__this_cpu_ptr(arch_timer_evt) = clk;
+
+ enable_percpu_irq(clk->irq, 0);
+ if (arch_timer_ppi2)
enable_percpu_irq(arch_timer_ppi2, 0);
+
+ return 0;
}
/* Is the optional system timer available? */
@@ -181,7 +188,7 @@
arch_timer_rate = freq;
pr_info("Architected local timer running at %lu.%02luMHz.\n",
- arch_timer_rate / 1000000, (arch_timer_rate % 100000) / 100);
+ freq / 1000000, (freq / 10000) % 100);
}
return 0;
@@ -193,7 +200,7 @@
asm volatile("mrrc p15, 0, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
- return ((u64) cvalh << 32) | cvall;
+ return ((cycle_t) cvalh << 32) | cvall;
}
static inline cycle_t arch_counter_get_cntvct(void)
@@ -202,7 +209,7 @@
asm volatile("mrrc p15, 1, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
- return ((u64) cvalh << 32) | cvall;
+ return ((cycle_t) cvalh << 32) | cvall;
}
static cycle_t arch_counter_read(struct clocksource *cs)
@@ -250,61 +257,36 @@
update_sched_clock(&cd, arch_counter_get_cntvct32(), (u32)~0);
}
-static void __cpuinit arch_timer_teardown(void *data)
+static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
{
- struct clock_event_device *clk = data;
pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
clk->irq, smp_processor_id());
- disable_percpu_irq(arch_timer_ppi);
- if (arch_timer_ppi2 > 0)
+ disable_percpu_irq(clk->irq);
+ if (arch_timer_ppi2)
disable_percpu_irq(arch_timer_ppi2);
arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
}
-static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
- unsigned long action, void *data)
-{
- int cpu = (int)data;
- struct clock_event_device *clk = per_cpu_ptr(arch_timer_evt, cpu);
-
- switch(action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- smp_call_function_single(cpu, arch_timer_setup, clk, 1);
- break;
-
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- smp_call_function_single(cpu, arch_timer_teardown, clk, 1);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
- .notifier_call = arch_timer_cpu_notify,
+static struct local_timer_ops arch_timer_ops __cpuinitdata = {
+ .setup = arch_timer_setup,
+ .stop = arch_timer_stop,
};
-int __init arch_timer_register(struct resource *res, int res_nr)
+int __init arch_timer_register(struct arch_timer *at)
{
int err;
- if (!res_nr || res[0].start < 0 || !(res[0].flags & IORESOURCE_IRQ))
+ if (at->res[0].start <= 0 || !(at->res[0].flags & IORESOURCE_IRQ))
return -EINVAL;
err = arch_timer_available();
if (err)
return err;
- arch_timer_evt = alloc_percpu(struct clock_event_device);
+ arch_timer_evt = alloc_percpu(struct clock_event_device *);
if (!arch_timer_evt)
return -ENOMEM;
- arch_timer_ppi = res[0].start;
- if (res_nr > 1 && (res[1].flags & IORESOURCE_IRQ))
- arch_timer_ppi2 = res[1].start;
-
clocksource_register_hz(&clocksource_counter, arch_timer_rate);
init_sched_clock(&cd, arch_timer_update_sched_clock, 32,
@@ -314,26 +296,41 @@
set_delay_fn(read_current_timer_delay_loop);
#endif
+ arch_timer_ppi = at->res[0].start;
err = request_percpu_irq(arch_timer_ppi, arch_timer_handler,
- "arch_sys_timer", arch_timer_evt);
+ "arch_timer", arch_timer_evt);
if (err) {
- pr_err("%s: can't register interrupt %d (%d)\n",
- "arch_sys_timer", arch_timer_ppi, err);
- return err;
+ pr_err("arch_timer: can't register interrupt %d (%d)\n",
+ arch_timer_ppi, err);
+ goto out_free;
}
- if (arch_timer_ppi2 > 0) {
+ if (at->res[1].start > 0 && (at->res[1].flags & IORESOURCE_IRQ)) {
+ arch_timer_ppi2 = at->res[1].start;
err = request_percpu_irq(arch_timer_ppi2, arch_timer_handler,
- "arch_sys_timer", arch_timer_evt);
- if (err)
- pr_warn("%s: can't register interrupt %d (%d)\n",
- "arch_sys_timer", arch_timer_ppi2, err);
+ "arch_timer", arch_timer_evt);
+ if (err) {
+ pr_err("arch_timer: can't register interrupt %d (%d)\n",
+ arch_timer_ppi2, err);
+ arch_timer_ppi2 = 0;
+ goto out_free_irq;
+ }
}
- /* Immediately configure the timer on the boot CPU */
- arch_timer_setup(per_cpu_ptr(arch_timer_evt, smp_processor_id()));
-
- register_cpu_notifier(&arch_timer_cpu_nb);
+ err = local_timer_register(&arch_timer_ops);
+ if (err)
+ goto out_free_irq;
+ percpu_timer_setup();
return 0;
+
+out_free_irq:
+ free_percpu_irq(arch_timer_ppi, arch_timer_evt);
+ if (arch_timer_ppi2)
+ free_percpu_irq(arch_timer_ppi2, arch_timer_evt);
+
+out_free:
+ free_percpu(arch_timer_evt);
+
+ return err;
}