blob: 2381df83bd0064287110896a2cfb0a2678a8a046 [file] [log] [blame]
David Howellsb920de12008-02-08 04:19:31 -08001/* MN10300 Arch-specific interrupt handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/module.h>
12#include <linux/interrupt.h>
13#include <linux/kernel_stat.h>
14#include <linux/seq_file.h>
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010015#include <linux/cpumask.h>
David Howellsb920de12008-02-08 04:19:31 -080016#include <asm/setup.h>
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010017#include <asm/serial-regs.h>
David Howellsb920de12008-02-08 04:19:31 -080018
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010019unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = {
20 [0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7
21};
David Howellsb920de12008-02-08 04:19:31 -080022EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
23
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010024#ifdef CONFIG_SMP
25static char irq_affinity_online[NR_IRQS] = {
26 [0 ... NR_IRQS - 1] = 0
27};
28
29#define NR_IRQ_WORDS ((NR_IRQS + 31) / 32)
30static unsigned long irq_affinity_request[NR_IRQ_WORDS] = {
31 [0 ... NR_IRQ_WORDS - 1] = 0
32};
33#endif /* CONFIG_SMP */
34
David Howellsb920de12008-02-08 04:19:31 -080035atomic_t irq_err_count;
36
37/*
David Howellsd6478fa2008-10-01 13:47:06 +010038 * MN10300 interrupt controller operations
David Howellsb920de12008-02-08 04:19:31 -080039 */
Thomas Gleixner125bb1d2011-03-18 16:52:51 +000040static void mn10300_cpupic_ack(struct irq_data *d)
David Howellsb920de12008-02-08 04:19:31 -080041{
Thomas Gleixner125bb1d2011-03-18 16:52:51 +000042 unsigned int irq = d->irq;
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010043 unsigned long flags;
David Howellsb920de12008-02-08 04:19:31 -080044 u16 tmp;
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010045
46 flags = arch_local_cli_save();
47 GxICR_u8(irq) = GxICR_DETECT;
David Howellsb920de12008-02-08 04:19:31 -080048 tmp = GxICR(irq);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010049 arch_local_irq_restore(flags);
50}
51
52static void __mask_and_set_icr(unsigned int irq,
53 unsigned int mask, unsigned int set)
54{
55 unsigned long flags;
56 u16 tmp;
57
58 flags = arch_local_cli_save();
59 tmp = GxICR(irq);
60 GxICR(irq) = (tmp & mask) | set;
61 tmp = GxICR(irq);
62 arch_local_irq_restore(flags);
David Howellsb920de12008-02-08 04:19:31 -080063}
64
Thomas Gleixner125bb1d2011-03-18 16:52:51 +000065static void mn10300_cpupic_mask(struct irq_data *d)
David Howellsb920de12008-02-08 04:19:31 -080066{
Thomas Gleixner125bb1d2011-03-18 16:52:51 +000067 __mask_and_set_icr(d->irq, GxICR_LEVEL, 0);
David Howellsb920de12008-02-08 04:19:31 -080068}
69
Thomas Gleixner125bb1d2011-03-18 16:52:51 +000070static void mn10300_cpupic_mask_ack(struct irq_data *d)
David Howellsb920de12008-02-08 04:19:31 -080071{
Thomas Gleixner125bb1d2011-03-18 16:52:51 +000072 unsigned int irq = d->irq;
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010073#ifdef CONFIG_SMP
74 unsigned long flags;
75 u16 tmp;
76
77 flags = arch_local_cli_save();
78
79 if (!test_and_clear_bit(irq, irq_affinity_request)) {
80 tmp = GxICR(irq);
81 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
82 tmp = GxICR(irq);
83 } else {
84 u16 tmp2;
85 tmp = GxICR(irq);
86 GxICR(irq) = (tmp & GxICR_LEVEL);
87 tmp2 = GxICR(irq);
88
Mark Salter730c1fa2010-10-27 17:28:57 +010089 irq_affinity_online[irq] =
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -070090 cpumask_any_and(d->affinity, cpu_online_mask);
Mark Salter730c1fa2010-10-27 17:28:57 +010091 CROSS_GxICR(irq, irq_affinity_online[irq]) =
92 (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
93 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010094 }
95
96 arch_local_irq_restore(flags);
97#else /* CONFIG_SMP */
98 __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT);
99#endif /* CONFIG_SMP */
David Howellsb920de12008-02-08 04:19:31 -0800100}
101
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000102static void mn10300_cpupic_unmask(struct irq_data *d)
David Howellsb920de12008-02-08 04:19:31 -0800103{
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000104 __mask_and_set_icr(d->irq, GxICR_LEVEL, GxICR_ENABLE);
David Howellsb920de12008-02-08 04:19:31 -0800105}
106
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000107static void mn10300_cpupic_unmask_clear(struct irq_data *d)
David Howellsd6478fa2008-10-01 13:47:06 +0100108{
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000109 unsigned int irq = d->irq;
David Howellsd6478fa2008-10-01 13:47:06 +0100110 /* the MN10300 PIC latches its interrupt request bit, even after the
111 * device has ceased to assert its interrupt line and the interrupt
112 * channel has been disabled in the PIC, so for level-triggered
113 * interrupts we need to clear the request bit when we re-enable */
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100114#ifdef CONFIG_SMP
115 unsigned long flags;
116 u16 tmp;
117
118 flags = arch_local_cli_save();
119
120 if (!test_and_clear_bit(irq, irq_affinity_request)) {
121 tmp = GxICR(irq);
122 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
123 tmp = GxICR(irq);
124 } else {
125 tmp = GxICR(irq);
126
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -0700127 irq_affinity_online[irq] = cpumask_any_and(d->affinity,
128 cpu_online_mask);
Mark Salter730c1fa2010-10-27 17:28:57 +0100129 CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
130 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100131 }
132
133 arch_local_irq_restore(flags);
134#else /* CONFIG_SMP */
135 __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT);
136#endif /* CONFIG_SMP */
David Howellsd6478fa2008-10-01 13:47:06 +0100137}
138
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100139#ifdef CONFIG_SMP
140static int
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000141mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask,
142 bool force)
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100143{
144 unsigned long flags;
145 int err;
146
147 flags = arch_local_cli_save();
148
149 /* check irq no */
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000150 switch (d->irq) {
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100151 case TMJCIRQ:
152 case RESCHEDULE_IPI:
153 case CALL_FUNC_SINGLE_IPI:
154 case LOCAL_TIMER_IPI:
155 case FLUSH_CACHE_IPI:
156 case CALL_FUNCTION_NMI_IPI:
David Howells67ddb402011-03-18 16:54:30 +0000157 case DEBUGGER_NMI_IPI:
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100158#ifdef CONFIG_MN10300_TTYSM0
159 case SC0RXIRQ:
160 case SC0TXIRQ:
161#ifdef CONFIG_MN10300_TTYSM0_TIMER8
162 case TM8IRQ:
163#elif CONFIG_MN10300_TTYSM0_TIMER2
164 case TM2IRQ:
165#endif /* CONFIG_MN10300_TTYSM0_TIMER8 */
166#endif /* CONFIG_MN10300_TTYSM0 */
167
168#ifdef CONFIG_MN10300_TTYSM1
169 case SC1RXIRQ:
170 case SC1TXIRQ:
171#ifdef CONFIG_MN10300_TTYSM1_TIMER12
172 case TM12IRQ:
173#elif CONFIG_MN10300_TTYSM1_TIMER9
174 case TM9IRQ:
175#elif CONFIG_MN10300_TTYSM1_TIMER3
176 case TM3IRQ:
177#endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
178#endif /* CONFIG_MN10300_TTYSM1 */
179
180#ifdef CONFIG_MN10300_TTYSM2
181 case SC2RXIRQ:
182 case SC2TXIRQ:
183 case TM10IRQ:
184#endif /* CONFIG_MN10300_TTYSM2 */
185 err = -1;
186 break;
187
188 default:
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000189 set_bit(d->irq, irq_affinity_request);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100190 err = 0;
191 break;
192 }
193
194 arch_local_irq_restore(flags);
195 return err;
196}
197#endif /* CONFIG_SMP */
198
David Howellsd6478fa2008-10-01 13:47:06 +0100199/*
200 * MN10300 PIC level-triggered IRQ handling.
201 *
202 * The PIC has no 'ACK' function per se. It is possible to clear individual
203 * channel latches, but each latch relatches whether or not the channel is
204 * masked, so we need to clear the latch when we unmask the channel.
205 *
206 * Also for this reason, we don't supply an ack() op (it's unused anyway if
207 * mask_ack() is provided), and mask_ack() just masks.
208 */
209static struct irq_chip mn10300_cpu_pic_level = {
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000210 .name = "cpu_l",
211 .irq_disable = mn10300_cpupic_mask,
212 .irq_enable = mn10300_cpupic_unmask_clear,
213 .irq_ack = NULL,
214 .irq_mask = mn10300_cpupic_mask,
215 .irq_mask_ack = mn10300_cpupic_mask,
216 .irq_unmask = mn10300_cpupic_unmask_clear,
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100217#ifdef CONFIG_SMP
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000218 .irq_set_affinity = mn10300_cpupic_setaffinity,
Mark Salter730c1fa2010-10-27 17:28:57 +0100219#endif
David Howellsd6478fa2008-10-01 13:47:06 +0100220};
221
222/*
223 * MN10300 PIC edge-triggered IRQ handling.
224 *
225 * We use the latch clearing function of the PIC as the 'ACK' function.
226 */
227static struct irq_chip mn10300_cpu_pic_edge = {
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000228 .name = "cpu_e",
229 .irq_disable = mn10300_cpupic_mask,
230 .irq_enable = mn10300_cpupic_unmask,
231 .irq_ack = mn10300_cpupic_ack,
232 .irq_mask = mn10300_cpupic_mask,
233 .irq_mask_ack = mn10300_cpupic_mask_ack,
234 .irq_unmask = mn10300_cpupic_unmask,
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100235#ifdef CONFIG_SMP
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000236 .irq_set_affinity = mn10300_cpupic_setaffinity,
Mark Salter730c1fa2010-10-27 17:28:57 +0100237#endif
David Howellsb920de12008-02-08 04:19:31 -0800238};
239
240/*
241 * 'what should we do if we get a hw irq event on an illegal vector'.
242 * each architecture has to answer this themselves.
243 */
244void ack_bad_irq(int irq)
245{
246 printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq);
247}
248
249/*
250 * change the level at which an IRQ executes
251 * - must not be called whilst interrupts are being processed!
252 */
253void set_intr_level(int irq, u16 level)
254{
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100255 BUG_ON(in_interrupt());
David Howellsb920de12008-02-08 04:19:31 -0800256
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100257 __mask_and_set_icr(irq, GxICR_ENABLE, level);
258}
David Howellsb920de12008-02-08 04:19:31 -0800259
David Howellsb920de12008-02-08 04:19:31 -0800260/*
261 * mark an interrupt to be ACK'd after interrupt handlers have been run rather
262 * than before
David Howellsb920de12008-02-08 04:19:31 -0800263 */
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100264void mn10300_set_lateack_irq_type(int irq)
David Howellsb920de12008-02-08 04:19:31 -0800265{
Thomas Gleixnerf4c547e2011-03-24 17:35:56 +0100266 irq_set_chip_and_handler(irq, &mn10300_cpu_pic_level,
David Howellsd6478fa2008-10-01 13:47:06 +0100267 handle_level_irq);
David Howellsb920de12008-02-08 04:19:31 -0800268}
269
270/*
271 * initialise the interrupt system
272 */
273void __init init_IRQ(void)
274{
275 int irq;
276
277 for (irq = 0; irq < NR_IRQS; irq++)
Thomas Gleixnerf4c547e2011-03-24 17:35:56 +0100278 if (irq_get_chip(irq) == &no_irq_chip)
David Howellsd6478fa2008-10-01 13:47:06 +0100279 /* due to the PIC latching interrupt requests, even
280 * when the IRQ is disabled, IRQ_PENDING is superfluous
281 * and we can use handle_level_irq() for edge-triggered
282 * interrupts */
Thomas Gleixnerf4c547e2011-03-24 17:35:56 +0100283 irq_set_chip_and_handler(irq, &mn10300_cpu_pic_edge,
David Howellsd6478fa2008-10-01 13:47:06 +0100284 handle_level_irq);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100285
David Howellsb920de12008-02-08 04:19:31 -0800286 unit_init_IRQ();
287}
288
289/*
290 * handle normal device IRQs
291 */
292asmlinkage void do_IRQ(void)
293{
294 unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw;
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100295 unsigned int cpu_id = smp_processor_id();
David Howellsb920de12008-02-08 04:19:31 -0800296 int irq;
297
298 sp = current_stack_pointer();
Stoyan Gaydarov292aa142010-10-27 17:28:33 +0100299 BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN);
David Howellsb920de12008-02-08 04:19:31 -0800300
301 /* make sure local_irq_enable() doesn't muck up the interrupt priority
302 * setting in EPSW */
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100303 old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id];
David Howellsb920de12008-02-08 04:19:31 -0800304 local_save_flags(epsw);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100305 __mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw);
David Howellsb920de12008-02-08 04:19:31 -0800306 irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL;
307
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100308#ifdef CONFIG_MN10300_WD_TIMER
309 __IRQ_STAT(cpu_id, __irq_count)++;
310#endif
David Howellsb920de12008-02-08 04:19:31 -0800311
312 irq_enter();
313
314 for (;;) {
315 /* ask the interrupt controller for the next IRQ to process
316 * - the result we get depends on EPSW.IM
317 */
318 irq = IAGR & IAGR_GN;
319 if (!irq)
320 break;
321
322 local_irq_restore(irq_disabled_epsw);
323
324 generic_handle_irq(irq >> 2);
325
326 /* restore IRQ controls for IAGR access */
327 local_irq_restore(epsw);
328 }
329
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100330 __mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw;
David Howellsb920de12008-02-08 04:19:31 -0800331
332 irq_exit();
333}
334
335/*
336 * Display interrupt management information through /proc/interrupts
337 */
Thomas Gleixner2a8f55b2011-03-24 18:54:24 +0100338int arch_show_interrupts(struct seq_file *p, int prec)
David Howellsb920de12008-02-08 04:19:31 -0800339{
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100340#ifdef CONFIG_MN10300_WD_TIMER
Thomas Gleixner2a8f55b2011-03-24 18:54:24 +0100341 int j;
342
343 seq_printf(p, "%*s: ", prec, "NMI");
344 for (j = 0; j < NR_CPUS; j++)
345 if (cpu_online(j))
346 seq_printf(p, "%10u ", nmi_count(j));
347 seq_putc(p, '\n');
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100348#endif
David Howellsb920de12008-02-08 04:19:31 -0800349
Thomas Gleixner2a8f55b2011-03-24 18:54:24 +0100350 seq_printf(p, "%*s: ", prec, "ERR");
351 seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
David Howellsb920de12008-02-08 04:19:31 -0800352 return 0;
353}
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100354
355#ifdef CONFIG_HOTPLUG_CPU
356void migrate_irqs(void)
357{
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100358 int irq;
359 unsigned int self, new;
360 unsigned long flags;
361
362 self = smp_processor_id();
363 for (irq = 0; irq < NR_IRQS; irq++) {
Thomas Gleixner232f1d82011-03-24 17:36:37 +0100364 struct irq_data *data = irq_get_irq_data(irq);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100365
Thomas Gleixner232f1d82011-03-24 17:36:37 +0100366 if (irqd_is_per_cpu(data))
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100367 continue;
368
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -0700369 if (cpumask_test_cpu(self, &data->affinity) &&
370 !cpumask_intersects(&irq_affinity[irq], cpu_online_mask)) {
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100371 int cpu_id;
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -0700372 cpu_id = cpumask_first(cpu_online_mask);
373 cpumask_set_cpu(cpu_id, &data->affinity);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100374 }
375 /* We need to operate irq_affinity_online atomically. */
376 arch_local_cli_save(flags);
377 if (irq_affinity_online[irq] == self) {
378 u16 x, tmp;
379
Mark Salter730c1fa2010-10-27 17:28:57 +0100380 x = GxICR(irq);
381 GxICR(irq) = x & GxICR_LEVEL;
382 tmp = GxICR(irq);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100383
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -0700384 new = cpumask_any_and(&data->affinity,
385 cpu_online_mask);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100386 irq_affinity_online[irq] = new;
387
388 CROSS_GxICR(irq, new) =
389 (x & GxICR_LEVEL) | GxICR_DETECT;
390 tmp = CROSS_GxICR(irq, new);
391
392 x &= GxICR_LEVEL | GxICR_ENABLE;
Andrew Mortond9a1abe2011-01-03 14:59:11 -0800393 if (GxICR(irq) & GxICR_REQUEST)
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100394 x |= GxICR_REQUEST | GxICR_DETECT;
395 CROSS_GxICR(irq, new) = x;
396 tmp = CROSS_GxICR(irq, new);
397 }
398 arch_local_irq_restore(flags);
399 }
400}
401#endif /* CONFIG_HOTPLUG_CPU */