| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 1 | /* | 
 | 2 |  * SuperH Timer Support - TMU | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 2009 Magnus Damm | 
 | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or modify | 
 | 7 |  * it under the terms of the GNU General Public License as published by | 
 | 8 |  * the Free Software Foundation; either version 2 of the License | 
 | 9 |  * | 
 | 10 |  * This program is distributed in the hope that it will be useful, | 
 | 11 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 12 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 13 |  * GNU General Public License for more details. | 
 | 14 |  * | 
 | 15 |  * You should have received a copy of the GNU General Public License | 
 | 16 |  * along with this program; if not, write to the Free Software | 
 | 17 |  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA | 
 | 18 |  */ | 
 | 19 |  | 
 | 20 | #include <linux/init.h> | 
 | 21 | #include <linux/platform_device.h> | 
 | 22 | #include <linux/spinlock.h> | 
 | 23 | #include <linux/interrupt.h> | 
 | 24 | #include <linux/ioport.h> | 
 | 25 | #include <linux/delay.h> | 
 | 26 | #include <linux/io.h> | 
 | 27 | #include <linux/clk.h> | 
 | 28 | #include <linux/irq.h> | 
 | 29 | #include <linux/err.h> | 
 | 30 | #include <linux/clocksource.h> | 
 | 31 | #include <linux/clockchips.h> | 
| Paul Mundt | 46a12f7 | 2009-05-03 17:57:17 +0900 | [diff] [blame] | 32 | #include <linux/sh_timer.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 33 | #include <linux/slab.h> | 
| Paul Gortmaker | 7deeab5 | 2011-07-03 13:36:22 -0400 | [diff] [blame] | 34 | #include <linux/module.h> | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 35 |  | 
 | 36 | struct sh_tmu_priv { | 
 | 37 | 	void __iomem *mapbase; | 
 | 38 | 	struct clk *clk; | 
 | 39 | 	struct irqaction irqaction; | 
 | 40 | 	struct platform_device *pdev; | 
 | 41 | 	unsigned long rate; | 
 | 42 | 	unsigned long periodic; | 
 | 43 | 	struct clock_event_device ced; | 
 | 44 | 	struct clocksource cs; | 
 | 45 | }; | 
 | 46 |  | 
 | 47 | static DEFINE_SPINLOCK(sh_tmu_lock); | 
 | 48 |  | 
 | 49 | #define TSTR -1 /* shared register */ | 
 | 50 | #define TCOR  0 /* channel register */ | 
 | 51 | #define TCNT 1 /* channel register */ | 
 | 52 | #define TCR 2 /* channel register */ | 
 | 53 |  | 
 | 54 | static inline unsigned long sh_tmu_read(struct sh_tmu_priv *p, int reg_nr) | 
 | 55 | { | 
| Paul Mundt | 46a12f7 | 2009-05-03 17:57:17 +0900 | [diff] [blame] | 56 | 	struct sh_timer_config *cfg = p->pdev->dev.platform_data; | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 57 | 	void __iomem *base = p->mapbase; | 
 | 58 | 	unsigned long offs; | 
 | 59 |  | 
 | 60 | 	if (reg_nr == TSTR) | 
 | 61 | 		return ioread8(base - cfg->channel_offset); | 
 | 62 |  | 
 | 63 | 	offs = reg_nr << 2; | 
 | 64 |  | 
 | 65 | 	if (reg_nr == TCR) | 
 | 66 | 		return ioread16(base + offs); | 
 | 67 | 	else | 
 | 68 | 		return ioread32(base + offs); | 
 | 69 | } | 
 | 70 |  | 
 | 71 | static inline void sh_tmu_write(struct sh_tmu_priv *p, int reg_nr, | 
 | 72 | 				unsigned long value) | 
 | 73 | { | 
| Paul Mundt | 46a12f7 | 2009-05-03 17:57:17 +0900 | [diff] [blame] | 74 | 	struct sh_timer_config *cfg = p->pdev->dev.platform_data; | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 75 | 	void __iomem *base = p->mapbase; | 
 | 76 | 	unsigned long offs; | 
 | 77 |  | 
 | 78 | 	if (reg_nr == TSTR) { | 
 | 79 | 		iowrite8(value, base - cfg->channel_offset); | 
 | 80 | 		return; | 
 | 81 | 	} | 
 | 82 |  | 
 | 83 | 	offs = reg_nr << 2; | 
 | 84 |  | 
 | 85 | 	if (reg_nr == TCR) | 
 | 86 | 		iowrite16(value, base + offs); | 
 | 87 | 	else | 
 | 88 | 		iowrite32(value, base + offs); | 
 | 89 | } | 
 | 90 |  | 
 | 91 | static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start) | 
 | 92 | { | 
| Paul Mundt | 46a12f7 | 2009-05-03 17:57:17 +0900 | [diff] [blame] | 93 | 	struct sh_timer_config *cfg = p->pdev->dev.platform_data; | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 94 | 	unsigned long flags, value; | 
 | 95 |  | 
 | 96 | 	/* start stop register shared by multiple timer channels */ | 
 | 97 | 	spin_lock_irqsave(&sh_tmu_lock, flags); | 
 | 98 | 	value = sh_tmu_read(p, TSTR); | 
 | 99 |  | 
 | 100 | 	if (start) | 
 | 101 | 		value |= 1 << cfg->timer_bit; | 
 | 102 | 	else | 
 | 103 | 		value &= ~(1 << cfg->timer_bit); | 
 | 104 |  | 
 | 105 | 	sh_tmu_write(p, TSTR, value); | 
 | 106 | 	spin_unlock_irqrestore(&sh_tmu_lock, flags); | 
 | 107 | } | 
 | 108 |  | 
 | 109 | static int sh_tmu_enable(struct sh_tmu_priv *p) | 
 | 110 | { | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 111 | 	int ret; | 
 | 112 |  | 
| Paul Mundt | d4905ce | 2011-05-31 15:23:20 +0900 | [diff] [blame] | 113 | 	/* enable clock */ | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 114 | 	ret = clk_enable(p->clk); | 
 | 115 | 	if (ret) { | 
| Paul Mundt | 214a607 | 2010-03-10 16:26:25 +0900 | [diff] [blame] | 116 | 		dev_err(&p->pdev->dev, "cannot enable clock\n"); | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 117 | 		return ret; | 
 | 118 | 	} | 
 | 119 |  | 
 | 120 | 	/* make sure channel is disabled */ | 
 | 121 | 	sh_tmu_start_stop_ch(p, 0); | 
 | 122 |  | 
 | 123 | 	/* maximum timeout */ | 
 | 124 | 	sh_tmu_write(p, TCOR, 0xffffffff); | 
 | 125 | 	sh_tmu_write(p, TCNT, 0xffffffff); | 
 | 126 |  | 
 | 127 | 	/* configure channel to parent clock / 4, irq off */ | 
 | 128 | 	p->rate = clk_get_rate(p->clk) / 4; | 
 | 129 | 	sh_tmu_write(p, TCR, 0x0000); | 
 | 130 |  | 
 | 131 | 	/* enable channel */ | 
 | 132 | 	sh_tmu_start_stop_ch(p, 1); | 
 | 133 |  | 
 | 134 | 	return 0; | 
 | 135 | } | 
 | 136 |  | 
 | 137 | static void sh_tmu_disable(struct sh_tmu_priv *p) | 
 | 138 | { | 
 | 139 | 	/* disable channel */ | 
 | 140 | 	sh_tmu_start_stop_ch(p, 0); | 
 | 141 |  | 
| Magnus Damm | be890a1 | 2009-06-17 05:04:04 +0000 | [diff] [blame] | 142 | 	/* disable interrupts in TMU block */ | 
 | 143 | 	sh_tmu_write(p, TCR, 0x0000); | 
 | 144 |  | 
| Paul Mundt | d4905ce | 2011-05-31 15:23:20 +0900 | [diff] [blame] | 145 | 	/* stop clock */ | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 146 | 	clk_disable(p->clk); | 
 | 147 | } | 
 | 148 |  | 
 | 149 | static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta, | 
 | 150 | 			    int periodic) | 
 | 151 | { | 
 | 152 | 	/* stop timer */ | 
 | 153 | 	sh_tmu_start_stop_ch(p, 0); | 
 | 154 |  | 
 | 155 | 	/* acknowledge interrupt */ | 
 | 156 | 	sh_tmu_read(p, TCR); | 
 | 157 |  | 
 | 158 | 	/* enable interrupt */ | 
 | 159 | 	sh_tmu_write(p, TCR, 0x0020); | 
 | 160 |  | 
 | 161 | 	/* reload delta value in case of periodic timer */ | 
 | 162 | 	if (periodic) | 
 | 163 | 		sh_tmu_write(p, TCOR, delta); | 
 | 164 | 	else | 
| Shin-ichiro KAWASAKI | 6f4b67b | 2009-06-21 10:56:22 +0000 | [diff] [blame] | 165 | 		sh_tmu_write(p, TCOR, 0xffffffff); | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 166 |  | 
 | 167 | 	sh_tmu_write(p, TCNT, delta); | 
 | 168 |  | 
 | 169 | 	/* start timer */ | 
 | 170 | 	sh_tmu_start_stop_ch(p, 1); | 
 | 171 | } | 
 | 172 |  | 
 | 173 | static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id) | 
 | 174 | { | 
 | 175 | 	struct sh_tmu_priv *p = dev_id; | 
 | 176 |  | 
 | 177 | 	/* disable or acknowledge interrupt */ | 
 | 178 | 	if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) | 
 | 179 | 		sh_tmu_write(p, TCR, 0x0000); | 
 | 180 | 	else | 
 | 181 | 		sh_tmu_write(p, TCR, 0x0020); | 
 | 182 |  | 
 | 183 | 	/* notify clockevent layer */ | 
 | 184 | 	p->ced.event_handler(&p->ced); | 
 | 185 | 	return IRQ_HANDLED; | 
 | 186 | } | 
 | 187 |  | 
 | 188 | static struct sh_tmu_priv *cs_to_sh_tmu(struct clocksource *cs) | 
 | 189 | { | 
 | 190 | 	return container_of(cs, struct sh_tmu_priv, cs); | 
 | 191 | } | 
 | 192 |  | 
 | 193 | static cycle_t sh_tmu_clocksource_read(struct clocksource *cs) | 
 | 194 | { | 
 | 195 | 	struct sh_tmu_priv *p = cs_to_sh_tmu(cs); | 
 | 196 |  | 
 | 197 | 	return sh_tmu_read(p, TCNT) ^ 0xffffffff; | 
 | 198 | } | 
 | 199 |  | 
 | 200 | static int sh_tmu_clocksource_enable(struct clocksource *cs) | 
 | 201 | { | 
 | 202 | 	struct sh_tmu_priv *p = cs_to_sh_tmu(cs); | 
| Magnus Damm | 0aeac45 | 2011-04-25 22:38:37 +0900 | [diff] [blame] | 203 | 	int ret; | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 204 |  | 
| Magnus Damm | 0aeac45 | 2011-04-25 22:38:37 +0900 | [diff] [blame] | 205 | 	ret = sh_tmu_enable(p); | 
 | 206 | 	if (!ret) | 
 | 207 | 		__clocksource_updatefreq_hz(cs, p->rate); | 
 | 208 | 	return ret; | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 209 | } | 
 | 210 |  | 
 | 211 | static void sh_tmu_clocksource_disable(struct clocksource *cs) | 
 | 212 | { | 
 | 213 | 	sh_tmu_disable(cs_to_sh_tmu(cs)); | 
 | 214 | } | 
 | 215 |  | 
 | 216 | static int sh_tmu_register_clocksource(struct sh_tmu_priv *p, | 
 | 217 | 				       char *name, unsigned long rating) | 
 | 218 | { | 
 | 219 | 	struct clocksource *cs = &p->cs; | 
 | 220 |  | 
 | 221 | 	cs->name = name; | 
 | 222 | 	cs->rating = rating; | 
 | 223 | 	cs->read = sh_tmu_clocksource_read; | 
 | 224 | 	cs->enable = sh_tmu_clocksource_enable; | 
 | 225 | 	cs->disable = sh_tmu_clocksource_disable; | 
 | 226 | 	cs->mask = CLOCKSOURCE_MASK(32); | 
 | 227 | 	cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; | 
| Aurelien Jarno | 66f4912 | 2010-05-31 21:45:48 +0000 | [diff] [blame] | 228 |  | 
| Paul Mundt | 214a607 | 2010-03-10 16:26:25 +0900 | [diff] [blame] | 229 | 	dev_info(&p->pdev->dev, "used as clock source\n"); | 
| Magnus Damm | 0aeac45 | 2011-04-25 22:38:37 +0900 | [diff] [blame] | 230 |  | 
 | 231 | 	/* Register with dummy 1 Hz value, gets updated in ->enable() */ | 
 | 232 | 	clocksource_register_hz(cs, 1); | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 233 | 	return 0; | 
 | 234 | } | 
 | 235 |  | 
 | 236 | static struct sh_tmu_priv *ced_to_sh_tmu(struct clock_event_device *ced) | 
 | 237 | { | 
 | 238 | 	return container_of(ced, struct sh_tmu_priv, ced); | 
 | 239 | } | 
 | 240 |  | 
 | 241 | static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic) | 
 | 242 | { | 
 | 243 | 	struct clock_event_device *ced = &p->ced; | 
 | 244 |  | 
 | 245 | 	sh_tmu_enable(p); | 
 | 246 |  | 
 | 247 | 	/* TODO: calculate good shift from rate and counter bit width */ | 
 | 248 |  | 
 | 249 | 	ced->shift = 32; | 
 | 250 | 	ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); | 
 | 251 | 	ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced); | 
 | 252 | 	ced->min_delta_ns = 5000; | 
 | 253 |  | 
 | 254 | 	if (periodic) { | 
 | 255 | 		p->periodic = (p->rate + HZ/2) / HZ; | 
 | 256 | 		sh_tmu_set_next(p, p->periodic, 1); | 
 | 257 | 	} | 
 | 258 | } | 
 | 259 |  | 
 | 260 | static void sh_tmu_clock_event_mode(enum clock_event_mode mode, | 
 | 261 | 				    struct clock_event_device *ced) | 
 | 262 | { | 
 | 263 | 	struct sh_tmu_priv *p = ced_to_sh_tmu(ced); | 
 | 264 | 	int disabled = 0; | 
 | 265 |  | 
 | 266 | 	/* deal with old setting first */ | 
 | 267 | 	switch (ced->mode) { | 
 | 268 | 	case CLOCK_EVT_MODE_PERIODIC: | 
 | 269 | 	case CLOCK_EVT_MODE_ONESHOT: | 
 | 270 | 		sh_tmu_disable(p); | 
 | 271 | 		disabled = 1; | 
 | 272 | 		break; | 
 | 273 | 	default: | 
 | 274 | 		break; | 
 | 275 | 	} | 
 | 276 |  | 
 | 277 | 	switch (mode) { | 
 | 278 | 	case CLOCK_EVT_MODE_PERIODIC: | 
| Paul Mundt | 214a607 | 2010-03-10 16:26:25 +0900 | [diff] [blame] | 279 | 		dev_info(&p->pdev->dev, "used for periodic clock events\n"); | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 280 | 		sh_tmu_clock_event_start(p, 1); | 
 | 281 | 		break; | 
 | 282 | 	case CLOCK_EVT_MODE_ONESHOT: | 
| Paul Mundt | 214a607 | 2010-03-10 16:26:25 +0900 | [diff] [blame] | 283 | 		dev_info(&p->pdev->dev, "used for oneshot clock events\n"); | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 284 | 		sh_tmu_clock_event_start(p, 0); | 
 | 285 | 		break; | 
 | 286 | 	case CLOCK_EVT_MODE_UNUSED: | 
 | 287 | 		if (!disabled) | 
 | 288 | 			sh_tmu_disable(p); | 
 | 289 | 		break; | 
 | 290 | 	case CLOCK_EVT_MODE_SHUTDOWN: | 
 | 291 | 	default: | 
 | 292 | 		break; | 
 | 293 | 	} | 
 | 294 | } | 
 | 295 |  | 
 | 296 | static int sh_tmu_clock_event_next(unsigned long delta, | 
 | 297 | 				   struct clock_event_device *ced) | 
 | 298 | { | 
 | 299 | 	struct sh_tmu_priv *p = ced_to_sh_tmu(ced); | 
 | 300 |  | 
 | 301 | 	BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); | 
 | 302 |  | 
 | 303 | 	/* program new delta value */ | 
 | 304 | 	sh_tmu_set_next(p, delta, 0); | 
 | 305 | 	return 0; | 
 | 306 | } | 
 | 307 |  | 
 | 308 | static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, | 
 | 309 | 				       char *name, unsigned long rating) | 
 | 310 | { | 
 | 311 | 	struct clock_event_device *ced = &p->ced; | 
 | 312 | 	int ret; | 
 | 313 |  | 
 | 314 | 	memset(ced, 0, sizeof(*ced)); | 
 | 315 |  | 
 | 316 | 	ced->name = name; | 
 | 317 | 	ced->features = CLOCK_EVT_FEAT_PERIODIC; | 
 | 318 | 	ced->features |= CLOCK_EVT_FEAT_ONESHOT; | 
 | 319 | 	ced->rating = rating; | 
 | 320 | 	ced->cpumask = cpumask_of(0); | 
 | 321 | 	ced->set_next_event = sh_tmu_clock_event_next; | 
 | 322 | 	ced->set_mode = sh_tmu_clock_event_mode; | 
 | 323 |  | 
| Paul Mundt | 214a607 | 2010-03-10 16:26:25 +0900 | [diff] [blame] | 324 | 	dev_info(&p->pdev->dev, "used for clock events\n"); | 
| Paul Mundt | da64c2a | 2010-02-25 16:37:46 +0900 | [diff] [blame] | 325 | 	clockevents_register_device(ced); | 
 | 326 |  | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 327 | 	ret = setup_irq(p->irqaction.irq, &p->irqaction); | 
 | 328 | 	if (ret) { | 
| Paul Mundt | 214a607 | 2010-03-10 16:26:25 +0900 | [diff] [blame] | 329 | 		dev_err(&p->pdev->dev, "failed to request irq %d\n", | 
 | 330 | 			p->irqaction.irq); | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 331 | 		return; | 
 | 332 | 	} | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 333 | } | 
 | 334 |  | 
 | 335 | static int sh_tmu_register(struct sh_tmu_priv *p, char *name, | 
 | 336 | 		    unsigned long clockevent_rating, | 
 | 337 | 		    unsigned long clocksource_rating) | 
 | 338 | { | 
 | 339 | 	if (clockevent_rating) | 
 | 340 | 		sh_tmu_register_clockevent(p, name, clockevent_rating); | 
 | 341 | 	else if (clocksource_rating) | 
 | 342 | 		sh_tmu_register_clocksource(p, name, clocksource_rating); | 
 | 343 |  | 
 | 344 | 	return 0; | 
 | 345 | } | 
 | 346 |  | 
 | 347 | static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev) | 
 | 348 | { | 
| Paul Mundt | 46a12f7 | 2009-05-03 17:57:17 +0900 | [diff] [blame] | 349 | 	struct sh_timer_config *cfg = pdev->dev.platform_data; | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 350 | 	struct resource *res; | 
 | 351 | 	int irq, ret; | 
 | 352 | 	ret = -ENXIO; | 
 | 353 |  | 
 | 354 | 	memset(p, 0, sizeof(*p)); | 
 | 355 | 	p->pdev = pdev; | 
 | 356 |  | 
 | 357 | 	if (!cfg) { | 
 | 358 | 		dev_err(&p->pdev->dev, "missing platform data\n"); | 
 | 359 | 		goto err0; | 
 | 360 | 	} | 
 | 361 |  | 
 | 362 | 	platform_set_drvdata(pdev, p); | 
 | 363 |  | 
 | 364 | 	res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); | 
 | 365 | 	if (!res) { | 
 | 366 | 		dev_err(&p->pdev->dev, "failed to get I/O memory\n"); | 
 | 367 | 		goto err0; | 
 | 368 | 	} | 
 | 369 |  | 
 | 370 | 	irq = platform_get_irq(p->pdev, 0); | 
 | 371 | 	if (irq < 0) { | 
 | 372 | 		dev_err(&p->pdev->dev, "failed to get irq\n"); | 
 | 373 | 		goto err0; | 
 | 374 | 	} | 
 | 375 |  | 
 | 376 | 	/* map memory, let mapbase point to our channel */ | 
 | 377 | 	p->mapbase = ioremap_nocache(res->start, resource_size(res)); | 
 | 378 | 	if (p->mapbase == NULL) { | 
| Paul Mundt | 214a607 | 2010-03-10 16:26:25 +0900 | [diff] [blame] | 379 | 		dev_err(&p->pdev->dev, "failed to remap I/O memory\n"); | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 380 | 		goto err0; | 
 | 381 | 	} | 
 | 382 |  | 
 | 383 | 	/* setup data for setup_irq() (too early for request_irq()) */ | 
| Paul Mundt | 214a607 | 2010-03-10 16:26:25 +0900 | [diff] [blame] | 384 | 	p->irqaction.name = dev_name(&p->pdev->dev); | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 385 | 	p->irqaction.handler = sh_tmu_interrupt; | 
 | 386 | 	p->irqaction.dev_id = p; | 
 | 387 | 	p->irqaction.irq = irq; | 
| Paul Mundt | fecf066 | 2010-04-15 11:59:28 +0900 | [diff] [blame] | 388 | 	p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ | 
 | 389 | 			     IRQF_IRQPOLL  | IRQF_NOBALANCING; | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 390 |  | 
 | 391 | 	/* get hold of clock */ | 
| Paul Mundt | c2a25e8 | 2010-03-29 16:55:43 +0900 | [diff] [blame] | 392 | 	p->clk = clk_get(&p->pdev->dev, "tmu_fck"); | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 393 | 	if (IS_ERR(p->clk)) { | 
| Magnus Damm | 03ff858 | 2010-10-13 07:36:38 +0000 | [diff] [blame] | 394 | 		dev_err(&p->pdev->dev, "cannot get clock\n"); | 
 | 395 | 		ret = PTR_ERR(p->clk); | 
 | 396 | 		goto err1; | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 397 | 	} | 
 | 398 |  | 
| Paul Mundt | 214a607 | 2010-03-10 16:26:25 +0900 | [diff] [blame] | 399 | 	return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 400 | 			       cfg->clockevent_rating, | 
 | 401 | 			       cfg->clocksource_rating); | 
 | 402 |  err1: | 
 | 403 | 	iounmap(p->mapbase); | 
 | 404 |  err0: | 
 | 405 | 	return ret; | 
 | 406 | } | 
 | 407 |  | 
 | 408 | static int __devinit sh_tmu_probe(struct platform_device *pdev) | 
 | 409 | { | 
 | 410 | 	struct sh_tmu_priv *p = platform_get_drvdata(pdev); | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 411 | 	int ret; | 
 | 412 |  | 
 | 413 | 	if (p) { | 
| Paul Mundt | 214a607 | 2010-03-10 16:26:25 +0900 | [diff] [blame] | 414 | 		dev_info(&pdev->dev, "kept as earlytimer\n"); | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 415 | 		return 0; | 
 | 416 | 	} | 
 | 417 |  | 
 | 418 | 	p = kmalloc(sizeof(*p), GFP_KERNEL); | 
 | 419 | 	if (p == NULL) { | 
 | 420 | 		dev_err(&pdev->dev, "failed to allocate driver data\n"); | 
 | 421 | 		return -ENOMEM; | 
 | 422 | 	} | 
 | 423 |  | 
 | 424 | 	ret = sh_tmu_setup(p, pdev); | 
 | 425 | 	if (ret) { | 
 | 426 | 		kfree(p); | 
 | 427 | 		platform_set_drvdata(pdev, NULL); | 
 | 428 | 	} | 
| Magnus Damm | 9570ef2 | 2009-05-01 06:51:00 +0000 | [diff] [blame] | 429 | 	return ret; | 
 | 430 | } | 
 | 431 |  | 
 | 432 | static int __devexit sh_tmu_remove(struct platform_device *pdev) | 
 | 433 | { | 
 | 434 | 	return -EBUSY; /* cannot unregister clockevent and clocksource */ | 
 | 435 | } | 
 | 436 |  | 
 | 437 | static struct platform_driver sh_tmu_device_driver = { | 
 | 438 | 	.probe		= sh_tmu_probe, | 
 | 439 | 	.remove		= __devexit_p(sh_tmu_remove), | 
 | 440 | 	.driver		= { | 
 | 441 | 		.name	= "sh_tmu", | 
 | 442 | 	} | 
 | 443 | }; | 
 | 444 |  | 
 | 445 | static int __init sh_tmu_init(void) | 
 | 446 | { | 
 | 447 | 	return platform_driver_register(&sh_tmu_device_driver); | 
 | 448 | } | 
 | 449 |  | 
 | 450 | static void __exit sh_tmu_exit(void) | 
 | 451 | { | 
 | 452 | 	platform_driver_unregister(&sh_tmu_device_driver); | 
 | 453 | } | 
 | 454 |  | 
 | 455 | early_platform_init("earlytimer", &sh_tmu_device_driver); | 
 | 456 | module_init(sh_tmu_init); | 
 | 457 | module_exit(sh_tmu_exit); | 
 | 458 |  | 
 | 459 | MODULE_AUTHOR("Magnus Damm"); | 
 | 460 | MODULE_DESCRIPTION("SuperH TMU Timer Driver"); | 
 | 461 | MODULE_LICENSE("GPL v2"); |