Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 1 | /* |
| 2 | * arch/sh/kernel/timers/timer-tmu.c - TMU Timer Support |
| 3 | * |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 4 | * Copyright (C) 2005 - 2007 Paul Mundt |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 5 | * |
| 6 | * TMU handling code hacked out of arch/sh/kernel/time.c |
| 7 | * |
| 8 | * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka |
| 9 | * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> |
| 10 | * Copyright (C) 2002, 2003, 2004 Paul Mundt |
| 11 | * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> |
| 12 | * |
| 13 | * This file is subject to the terms and conditions of the GNU General Public |
| 14 | * License. See the file "COPYING" in the main directory of this archive |
| 15 | * for more details. |
| 16 | */ |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/kernel.h> |
| 19 | #include <linux/interrupt.h> |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 20 | #include <linux/seqlock.h> |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 21 | #include <linux/clockchips.h> |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 22 | #include <asm/timer.h> |
| 23 | #include <asm/rtc.h> |
| 24 | #include <asm/io.h> |
| 25 | #include <asm/irq.h> |
| 26 | #include <asm/clock.h> |
| 27 | |
| 28 | #define TMU_TOCR_INIT 0x00 |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 29 | #define TMU_TCR_INIT 0x0020 |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 30 | |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 31 | static int tmu_timer_start(void) |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 32 | { |
Paul Mundt | 2b1bd1a | 2007-06-20 18:27:10 +0900 | [diff] [blame] | 33 | ctrl_outb(ctrl_inb(TMU_012_TSTR) | 0x3, TMU_012_TSTR); |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 34 | return 0; |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 35 | } |
| 36 | |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 37 | static void tmu0_timer_set_interval(unsigned long interval, unsigned int reload) |
| 38 | { |
| 39 | ctrl_outl(interval, TMU0_TCNT); |
| 40 | |
| 41 | /* |
| 42 | * TCNT reloads from TCOR on underflow, clear it if we don't |
| 43 | * intend to auto-reload |
| 44 | */ |
| 45 | if (reload) |
| 46 | ctrl_outl(interval, TMU0_TCOR); |
| 47 | else |
| 48 | ctrl_outl(0, TMU0_TCOR); |
| 49 | |
| 50 | tmu_timer_start(); |
| 51 | } |
| 52 | |
| 53 | static int tmu_timer_stop(void) |
| 54 | { |
Paul Mundt | 2b1bd1a | 2007-06-20 18:27:10 +0900 | [diff] [blame] | 55 | ctrl_outb(ctrl_inb(TMU_012_TSTR) & ~0x3, TMU_012_TSTR); |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 56 | return 0; |
| 57 | } |
| 58 | |
| 59 | static cycle_t tmu_timer_read(void) |
| 60 | { |
| 61 | return ~ctrl_inl(TMU1_TCNT); |
| 62 | } |
| 63 | |
| 64 | static int tmu_set_next_event(unsigned long cycles, |
| 65 | struct clock_event_device *evt) |
| 66 | { |
| 67 | tmu0_timer_set_interval(cycles, 1); |
| 68 | return 0; |
| 69 | } |
| 70 | |
| 71 | static void tmu_set_mode(enum clock_event_mode mode, |
| 72 | struct clock_event_device *evt) |
| 73 | { |
| 74 | switch (mode) { |
| 75 | case CLOCK_EVT_MODE_PERIODIC: |
| 76 | ctrl_outl(ctrl_inl(TMU0_TCNT), TMU0_TCOR); |
| 77 | break; |
| 78 | case CLOCK_EVT_MODE_ONESHOT: |
| 79 | ctrl_outl(0, TMU0_TCOR); |
| 80 | break; |
| 81 | case CLOCK_EVT_MODE_UNUSED: |
| 82 | case CLOCK_EVT_MODE_SHUTDOWN: |
Thomas Gleixner | 18de5bc | 2007-07-21 04:37:34 -0700 | [diff] [blame] | 83 | case CLOCK_EVT_MODE_RESUME: |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 84 | break; |
| 85 | } |
| 86 | } |
| 87 | |
| 88 | static struct clock_event_device tmu0_clockevent = { |
| 89 | .name = "tmu0", |
| 90 | .shift = 32, |
| 91 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
| 92 | .set_mode = tmu_set_mode, |
| 93 | .set_next_event = tmu_set_next_event, |
| 94 | }; |
| 95 | |
Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 96 | static irqreturn_t tmu_timer_interrupt(int irq, void *dummy) |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 97 | { |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 98 | struct clock_event_device *evt = &tmu0_clockevent; |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 99 | unsigned long timer_status; |
| 100 | |
| 101 | /* Clear UNF bit */ |
| 102 | timer_status = ctrl_inw(TMU0_TCR); |
| 103 | timer_status &= ~0x100; |
| 104 | ctrl_outw(timer_status, TMU0_TCR); |
| 105 | |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 106 | evt->event_handler(evt); |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 107 | |
| 108 | return IRQ_HANDLED; |
| 109 | } |
| 110 | |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 111 | static struct irqaction tmu0_irq = { |
| 112 | .name = "periodic timer", |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 113 | .handler = tmu_timer_interrupt, |
Bernhard Walle | e9485ba | 2007-05-08 00:35:34 -0700 | [diff] [blame] | 114 | .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 115 | .mask = CPU_MASK_NONE, |
| 116 | }; |
| 117 | |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 118 | static void tmu0_clk_init(struct clk *clk) |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 119 | { |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 120 | u8 divisor = TMU_TCR_INIT & 0x7; |
| 121 | ctrl_outw(TMU_TCR_INIT, TMU0_TCR); |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 122 | clk->rate = clk->parent->rate / (4 << (divisor << 1)); |
| 123 | } |
| 124 | |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 125 | static void tmu0_clk_recalc(struct clk *clk) |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 126 | { |
| 127 | u8 divisor = ctrl_inw(TMU0_TCR) & 0x7; |
| 128 | clk->rate = clk->parent->rate / (4 << (divisor << 1)); |
| 129 | } |
| 130 | |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 131 | static struct clk_ops tmu0_clk_ops = { |
| 132 | .init = tmu0_clk_init, |
| 133 | .recalc = tmu0_clk_recalc, |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 134 | }; |
| 135 | |
| 136 | static struct clk tmu0_clk = { |
| 137 | .name = "tmu0_clk", |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 138 | .ops = &tmu0_clk_ops, |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 139 | }; |
| 140 | |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 141 | static void tmu1_clk_init(struct clk *clk) |
Andriy Skulysh | 3aa770e | 2006-09-27 16:20:22 +0900 | [diff] [blame] | 142 | { |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 143 | u8 divisor = TMU_TCR_INIT & 0x7; |
| 144 | ctrl_outw(divisor, TMU1_TCR); |
| 145 | clk->rate = clk->parent->rate / (4 << (divisor << 1)); |
Andriy Skulysh | 3aa770e | 2006-09-27 16:20:22 +0900 | [diff] [blame] | 146 | } |
| 147 | |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 148 | static void tmu1_clk_recalc(struct clk *clk) |
Andriy Skulysh | 3aa770e | 2006-09-27 16:20:22 +0900 | [diff] [blame] | 149 | { |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 150 | u8 divisor = ctrl_inw(TMU1_TCR) & 0x7; |
| 151 | clk->rate = clk->parent->rate / (4 << (divisor << 1)); |
Andriy Skulysh | 3aa770e | 2006-09-27 16:20:22 +0900 | [diff] [blame] | 152 | } |
| 153 | |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 154 | static struct clk_ops tmu1_clk_ops = { |
| 155 | .init = tmu1_clk_init, |
| 156 | .recalc = tmu1_clk_recalc, |
| 157 | }; |
| 158 | |
| 159 | static struct clk tmu1_clk = { |
| 160 | .name = "tmu1_clk", |
| 161 | .ops = &tmu1_clk_ops, |
| 162 | }; |
| 163 | |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 164 | static int tmu_timer_init(void) |
| 165 | { |
| 166 | unsigned long interval; |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 167 | unsigned long frequency; |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 168 | |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 169 | setup_irq(CONFIG_SH_TIMER_IRQ, &tmu0_irq); |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 170 | |
Paul Mundt | 1d11856 | 2006-12-01 13:15:14 +0900 | [diff] [blame] | 171 | tmu0_clk.parent = clk_get(NULL, "module_clk"); |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 172 | tmu1_clk.parent = clk_get(NULL, "module_clk"); |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 173 | |
Andriy Skulysh | 3aa770e | 2006-09-27 16:20:22 +0900 | [diff] [blame] | 174 | tmu_timer_stop(); |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 175 | |
Markus Brunner | 3ea6bc3 | 2007-08-20 08:59:33 +0900 | [diff] [blame] | 176 | #if !defined(CONFIG_CPU_SUBTYPE_SH7720) && \ |
Yoshihiro Shimoda | 31a49c4 | 2007-12-26 11:45:06 +0900 | [diff] [blame^] | 177 | !defined(CONFIG_CPU_SUBTYPE_SH7721) && \ |
Markus Brunner | 3ea6bc3 | 2007-08-20 08:59:33 +0900 | [diff] [blame] | 178 | !defined(CONFIG_CPU_SUBTYPE_SH7760) && \ |
Paul Mundt | 2b1bd1a | 2007-06-20 18:27:10 +0900 | [diff] [blame] | 179 | !defined(CONFIG_CPU_SUBTYPE_SH7785) && \ |
| 180 | !defined(CONFIG_CPU_SUBTYPE_SHX3) |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 181 | ctrl_outb(TMU_TOCR_INIT, TMU_TOCR); |
| 182 | #endif |
| 183 | |
| 184 | clk_register(&tmu0_clk); |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 185 | clk_register(&tmu1_clk); |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 186 | clk_enable(&tmu0_clk); |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 187 | clk_enable(&tmu1_clk); |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 188 | |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 189 | frequency = clk_get_rate(&tmu0_clk); |
| 190 | interval = (frequency + HZ / 2) / HZ; |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 191 | |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 192 | sh_hpt_frequency = clk_get_rate(&tmu1_clk); |
| 193 | ctrl_outl(~0, TMU1_TCNT); |
| 194 | ctrl_outl(~0, TMU1_TCOR); |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 195 | |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 196 | tmu0_timer_set_interval(interval, 1); |
| 197 | |
| 198 | tmu0_clockevent.mult = div_sc(frequency, NSEC_PER_SEC, |
| 199 | tmu0_clockevent.shift); |
| 200 | tmu0_clockevent.max_delta_ns = |
| 201 | clockevent_delta2ns(-1, &tmu0_clockevent); |
| 202 | tmu0_clockevent.min_delta_ns = |
| 203 | clockevent_delta2ns(1, &tmu0_clockevent); |
| 204 | |
| 205 | tmu0_clockevent.cpumask = cpumask_of_cpu(0); |
| 206 | |
| 207 | clockevents_register_device(&tmu0_clockevent); |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 208 | |
| 209 | return 0; |
| 210 | } |
| 211 | |
| 212 | struct sys_timer_ops tmu_timer_ops = { |
| 213 | .init = tmu_timer_init, |
Andriy Skulysh | 3aa770e | 2006-09-27 16:20:22 +0900 | [diff] [blame] | 214 | .start = tmu_timer_start, |
| 215 | .stop = tmu_timer_stop, |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 216 | .read = tmu_timer_read, |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 217 | }; |
| 218 | |
| 219 | struct sys_timer tmu_timer = { |
| 220 | .name = "tmu", |
| 221 | .ops = &tmu_timer_ops, |
| 222 | }; |