| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 1 | /* MN10300 Arch-specific interrupt handling | 
|  | 2 | * | 
|  | 3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | 
|  | 4 | * Written by David Howells (dhowells@redhat.com) | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or | 
|  | 7 | * modify it under the terms of the GNU General Public Licence | 
|  | 8 | * as published by the Free Software Foundation; either version | 
|  | 9 | * 2 of the Licence, or (at your option) any later version. | 
|  | 10 | */ | 
|  | 11 | #include <linux/module.h> | 
|  | 12 | #include <linux/interrupt.h> | 
|  | 13 | #include <linux/kernel_stat.h> | 
|  | 14 | #include <linux/seq_file.h> | 
|  | 15 | #include <asm/setup.h> | 
|  | 16 |  | 
|  | 17 | unsigned long __mn10300_irq_enabled_epsw = EPSW_IE | EPSW_IM_7; | 
|  | 18 | EXPORT_SYMBOL(__mn10300_irq_enabled_epsw); | 
|  | 19 |  | 
|  | 20 | atomic_t irq_err_count; | 
|  | 21 |  | 
|  | 22 | /* | 
| David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 23 | * MN10300 interrupt controller operations | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 24 | */ | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 25 | static void mn10300_cpupic_ack(unsigned int irq) | 
|  | 26 | { | 
|  | 27 | u16 tmp; | 
|  | 28 | *(volatile u8 *) &GxICR(irq) = GxICR_DETECT; | 
|  | 29 | tmp = GxICR(irq); | 
|  | 30 | } | 
|  | 31 |  | 
|  | 32 | static void mn10300_cpupic_mask(unsigned int irq) | 
|  | 33 | { | 
|  | 34 | u16 tmp = GxICR(irq); | 
|  | 35 | GxICR(irq) = (tmp & GxICR_LEVEL); | 
|  | 36 | tmp = GxICR(irq); | 
|  | 37 | } | 
|  | 38 |  | 
|  | 39 | static void mn10300_cpupic_mask_ack(unsigned int irq) | 
|  | 40 | { | 
|  | 41 | u16 tmp = GxICR(irq); | 
|  | 42 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT; | 
|  | 43 | tmp = GxICR(irq); | 
|  | 44 | } | 
|  | 45 |  | 
|  | 46 | static void mn10300_cpupic_unmask(unsigned int irq) | 
|  | 47 | { | 
|  | 48 | u16 tmp = GxICR(irq); | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 49 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE; | 
|  | 50 | tmp = GxICR(irq); | 
|  | 51 | } | 
|  | 52 |  | 
| David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 53 | static void mn10300_cpupic_unmask_clear(unsigned int irq) | 
|  | 54 | { | 
|  | 55 | /* the MN10300 PIC latches its interrupt request bit, even after the | 
|  | 56 | * device has ceased to assert its interrupt line and the interrupt | 
|  | 57 | * channel has been disabled in the PIC, so for level-triggered | 
|  | 58 | * interrupts we need to clear the request bit when we re-enable */ | 
|  | 59 | u16 tmp = GxICR(irq); | 
|  | 60 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; | 
|  | 61 | tmp = GxICR(irq); | 
|  | 62 | } | 
|  | 63 |  | 
|  | 64 | /* | 
|  | 65 | * MN10300 PIC level-triggered IRQ handling. | 
|  | 66 | * | 
|  | 67 | * The PIC has no 'ACK' function per se.  It is possible to clear individual | 
|  | 68 | * channel latches, but each latch relatches whether or not the channel is | 
|  | 69 | * masked, so we need to clear the latch when we unmask the channel. | 
|  | 70 | * | 
|  | 71 | * Also for this reason, we don't supply an ack() op (it's unused anyway if | 
|  | 72 | * mask_ack() is provided), and mask_ack() just masks. | 
|  | 73 | */ | 
|  | 74 | static struct irq_chip mn10300_cpu_pic_level = { | 
|  | 75 | .name		= "cpu_l", | 
|  | 76 | .disable	= mn10300_cpupic_mask, | 
|  | 77 | .enable		= mn10300_cpupic_unmask_clear, | 
|  | 78 | .ack		= NULL, | 
|  | 79 | .mask		= mn10300_cpupic_mask, | 
|  | 80 | .mask_ack	= mn10300_cpupic_mask, | 
|  | 81 | .unmask		= mn10300_cpupic_unmask_clear, | 
|  | 82 | }; | 
|  | 83 |  | 
|  | 84 | /* | 
|  | 85 | * MN10300 PIC edge-triggered IRQ handling. | 
|  | 86 | * | 
|  | 87 | * We use the latch clearing function of the PIC as the 'ACK' function. | 
|  | 88 | */ | 
|  | 89 | static struct irq_chip mn10300_cpu_pic_edge = { | 
|  | 90 | .name		= "cpu_e", | 
|  | 91 | .disable	= mn10300_cpupic_mask, | 
|  | 92 | .enable		= mn10300_cpupic_unmask, | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 93 | .ack		= mn10300_cpupic_ack, | 
|  | 94 | .mask		= mn10300_cpupic_mask, | 
|  | 95 | .mask_ack	= mn10300_cpupic_mask_ack, | 
|  | 96 | .unmask		= mn10300_cpupic_unmask, | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 97 | }; | 
|  | 98 |  | 
|  | 99 | /* | 
|  | 100 | * 'what should we do if we get a hw irq event on an illegal vector'. | 
|  | 101 | * each architecture has to answer this themselves. | 
|  | 102 | */ | 
|  | 103 | void ack_bad_irq(int irq) | 
|  | 104 | { | 
|  | 105 | printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq); | 
|  | 106 | } | 
|  | 107 |  | 
|  | 108 | /* | 
|  | 109 | * change the level at which an IRQ executes | 
|  | 110 | * - must not be called whilst interrupts are being processed! | 
|  | 111 | */ | 
|  | 112 | void set_intr_level(int irq, u16 level) | 
|  | 113 | { | 
|  | 114 | u16 tmp; | 
|  | 115 |  | 
|  | 116 | if (in_interrupt()) | 
|  | 117 | BUG(); | 
|  | 118 |  | 
|  | 119 | tmp = GxICR(irq); | 
|  | 120 | GxICR(irq) = (tmp & GxICR_ENABLE) | level; | 
|  | 121 | tmp = GxICR(irq); | 
|  | 122 | } | 
|  | 123 |  | 
|  | 124 | /* | 
|  | 125 | * mark an interrupt to be ACK'd after interrupt handlers have been run rather | 
|  | 126 | * than before | 
|  | 127 | * - see Documentation/mn10300/features.txt | 
|  | 128 | */ | 
|  | 129 | void set_intr_postackable(int irq) | 
|  | 130 | { | 
| David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 131 | set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level, | 
|  | 132 | handle_level_irq); | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 133 | } | 
|  | 134 |  | 
|  | 135 | /* | 
|  | 136 | * initialise the interrupt system | 
|  | 137 | */ | 
|  | 138 | void __init init_IRQ(void) | 
|  | 139 | { | 
|  | 140 | int irq; | 
|  | 141 |  | 
|  | 142 | for (irq = 0; irq < NR_IRQS; irq++) | 
| Thomas Gleixner | 91e58b6 | 2009-04-09 18:18:47 +0100 | [diff] [blame] | 143 | if (irq_desc[irq].chip == &no_irq_chip) | 
| David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 144 | /* due to the PIC latching interrupt requests, even | 
|  | 145 | * when the IRQ is disabled, IRQ_PENDING is superfluous | 
|  | 146 | * and we can use handle_level_irq() for edge-triggered | 
|  | 147 | * interrupts */ | 
|  | 148 | set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge, | 
|  | 149 | handle_level_irq); | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 150 | unit_init_IRQ(); | 
|  | 151 | } | 
|  | 152 |  | 
|  | 153 | /* | 
|  | 154 | * handle normal device IRQs | 
|  | 155 | */ | 
|  | 156 | asmlinkage void do_IRQ(void) | 
|  | 157 | { | 
|  | 158 | unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw; | 
|  | 159 | int irq; | 
|  | 160 |  | 
|  | 161 | sp = current_stack_pointer(); | 
|  | 162 | if (sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN) | 
|  | 163 | BUG(); | 
|  | 164 |  | 
|  | 165 | /* make sure local_irq_enable() doesn't muck up the interrupt priority | 
|  | 166 | * setting in EPSW */ | 
|  | 167 | old_irq_enabled_epsw = __mn10300_irq_enabled_epsw; | 
|  | 168 | local_save_flags(epsw); | 
|  | 169 | __mn10300_irq_enabled_epsw = EPSW_IE | (EPSW_IM & epsw); | 
|  | 170 | irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL; | 
|  | 171 |  | 
|  | 172 | __IRQ_STAT(smp_processor_id(), __irq_count)++; | 
|  | 173 |  | 
|  | 174 | irq_enter(); | 
|  | 175 |  | 
|  | 176 | for (;;) { | 
|  | 177 | /* ask the interrupt controller for the next IRQ to process | 
|  | 178 | * - the result we get depends on EPSW.IM | 
|  | 179 | */ | 
|  | 180 | irq = IAGR & IAGR_GN; | 
|  | 181 | if (!irq) | 
|  | 182 | break; | 
|  | 183 |  | 
|  | 184 | local_irq_restore(irq_disabled_epsw); | 
|  | 185 |  | 
|  | 186 | generic_handle_irq(irq >> 2); | 
|  | 187 |  | 
|  | 188 | /* restore IRQ controls for IAGR access */ | 
|  | 189 | local_irq_restore(epsw); | 
|  | 190 | } | 
|  | 191 |  | 
|  | 192 | __mn10300_irq_enabled_epsw = old_irq_enabled_epsw; | 
|  | 193 |  | 
|  | 194 | irq_exit(); | 
|  | 195 | } | 
|  | 196 |  | 
|  | 197 | /* | 
|  | 198 | * Display interrupt management information through /proc/interrupts | 
|  | 199 | */ | 
|  | 200 | int show_interrupts(struct seq_file *p, void *v) | 
|  | 201 | { | 
|  | 202 | int i = *(loff_t *) v, j, cpu; | 
|  | 203 | struct irqaction *action; | 
|  | 204 | unsigned long flags; | 
|  | 205 |  | 
|  | 206 | switch (i) { | 
|  | 207 | /* display column title bar naming CPUs */ | 
|  | 208 | case 0: | 
|  | 209 | seq_printf(p, "           "); | 
|  | 210 | for (j = 0; j < NR_CPUS; j++) | 
|  | 211 | if (cpu_online(j)) | 
|  | 212 | seq_printf(p, "CPU%d       ", j); | 
|  | 213 | seq_putc(p, '\n'); | 
|  | 214 | break; | 
|  | 215 |  | 
|  | 216 | /* display information rows, one per active CPU */ | 
|  | 217 | case 1 ... NR_IRQS - 1: | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 218 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 219 |  | 
|  | 220 | action = irq_desc[i].action; | 
|  | 221 | if (action) { | 
|  | 222 | seq_printf(p, "%3d: ", i); | 
|  | 223 | for_each_present_cpu(cpu) | 
| Yinghai Lu | dee4102 | 2009-01-11 00:29:15 -0800 | [diff] [blame] | 224 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 225 | seq_printf(p, " %14s.%u", irq_desc[i].chip->name, | 
|  | 226 | (GxICR(i) & GxICR_LEVEL) >> | 
|  | 227 | GxICR_LEVEL_SHIFT); | 
|  | 228 | seq_printf(p, "  %s", action->name); | 
|  | 229 |  | 
|  | 230 | for (action = action->next; | 
|  | 231 | action; | 
|  | 232 | action = action->next) | 
|  | 233 | seq_printf(p, ", %s", action->name); | 
|  | 234 |  | 
|  | 235 | seq_putc(p, '\n'); | 
|  | 236 | } | 
|  | 237 |  | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 238 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 239 | break; | 
|  | 240 |  | 
|  | 241 | /* polish off with NMI and error counters */ | 
|  | 242 | case NR_IRQS: | 
|  | 243 | seq_printf(p, "NMI: "); | 
|  | 244 | for (j = 0; j < NR_CPUS; j++) | 
|  | 245 | if (cpu_online(j)) | 
|  | 246 | seq_printf(p, "%10u ", nmi_count(j)); | 
|  | 247 | seq_putc(p, '\n'); | 
|  | 248 |  | 
|  | 249 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | 
|  | 250 | break; | 
|  | 251 | } | 
|  | 252 |  | 
|  | 253 | return 0; | 
|  | 254 | } |