David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 1 | /* MN10300 Arch-specific interrupt handling |
| 2 | * |
| 3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public Licence |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the Licence, or (at your option) any later version. |
| 10 | */ |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/kernel_stat.h> |
| 14 | #include <linux/seq_file.h> |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 15 | #include <linux/cpumask.h> |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 16 | #include <asm/setup.h> |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 17 | #include <asm/serial-regs.h> |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 18 | |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 19 | unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = { |
| 20 | [0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7 |
| 21 | }; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 22 | EXPORT_SYMBOL(__mn10300_irq_enabled_epsw); |
| 23 | |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 24 | #ifdef CONFIG_SMP |
| 25 | static char irq_affinity_online[NR_IRQS] = { |
| 26 | [0 ... NR_IRQS - 1] = 0 |
| 27 | }; |
| 28 | |
| 29 | #define NR_IRQ_WORDS ((NR_IRQS + 31) / 32) |
| 30 | static unsigned long irq_affinity_request[NR_IRQ_WORDS] = { |
| 31 | [0 ... NR_IRQ_WORDS - 1] = 0 |
| 32 | }; |
| 33 | #endif /* CONFIG_SMP */ |
| 34 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 35 | atomic_t irq_err_count; |
| 36 | |
| 37 | /* |
David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 38 | * MN10300 interrupt controller operations |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 39 | */ |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 40 | static void mn10300_cpupic_ack(unsigned int irq) |
| 41 | { |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 42 | unsigned long flags; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 43 | u16 tmp; |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 44 | |
| 45 | flags = arch_local_cli_save(); |
| 46 | GxICR_u8(irq) = GxICR_DETECT; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 47 | tmp = GxICR(irq); |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 48 | arch_local_irq_restore(flags); |
| 49 | } |
| 50 | |
| 51 | static void __mask_and_set_icr(unsigned int irq, |
| 52 | unsigned int mask, unsigned int set) |
| 53 | { |
| 54 | unsigned long flags; |
| 55 | u16 tmp; |
| 56 | |
| 57 | flags = arch_local_cli_save(); |
| 58 | tmp = GxICR(irq); |
| 59 | GxICR(irq) = (tmp & mask) | set; |
| 60 | tmp = GxICR(irq); |
| 61 | arch_local_irq_restore(flags); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 62 | } |
| 63 | |
| 64 | static void mn10300_cpupic_mask(unsigned int irq) |
| 65 | { |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 66 | __mask_and_set_icr(irq, GxICR_LEVEL, 0); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 67 | } |
| 68 | |
| 69 | static void mn10300_cpupic_mask_ack(unsigned int irq) |
| 70 | { |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 71 | #ifdef CONFIG_SMP |
| 72 | unsigned long flags; |
| 73 | u16 tmp; |
| 74 | |
| 75 | flags = arch_local_cli_save(); |
| 76 | |
| 77 | if (!test_and_clear_bit(irq, irq_affinity_request)) { |
| 78 | tmp = GxICR(irq); |
| 79 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT; |
| 80 | tmp = GxICR(irq); |
| 81 | } else { |
| 82 | u16 tmp2; |
| 83 | tmp = GxICR(irq); |
| 84 | GxICR(irq) = (tmp & GxICR_LEVEL); |
| 85 | tmp2 = GxICR(irq); |
| 86 | |
Mark Salter | 730c1fa | 2010-10-27 17:28:57 +0100 | [diff] [blame] | 87 | irq_affinity_online[irq] = |
| 88 | any_online_cpu(*irq_desc[irq].affinity); |
| 89 | CROSS_GxICR(irq, irq_affinity_online[irq]) = |
| 90 | (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT; |
| 91 | tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 92 | } |
| 93 | |
| 94 | arch_local_irq_restore(flags); |
| 95 | #else /* CONFIG_SMP */ |
| 96 | __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT); |
| 97 | #endif /* CONFIG_SMP */ |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 98 | } |
| 99 | |
| 100 | static void mn10300_cpupic_unmask(unsigned int irq) |
| 101 | { |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 102 | __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 103 | } |
| 104 | |
David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 105 | static void mn10300_cpupic_unmask_clear(unsigned int irq) |
| 106 | { |
| 107 | /* the MN10300 PIC latches its interrupt request bit, even after the |
| 108 | * device has ceased to assert its interrupt line and the interrupt |
| 109 | * channel has been disabled in the PIC, so for level-triggered |
| 110 | * interrupts we need to clear the request bit when we re-enable */ |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 111 | #ifdef CONFIG_SMP |
| 112 | unsigned long flags; |
| 113 | u16 tmp; |
| 114 | |
| 115 | flags = arch_local_cli_save(); |
| 116 | |
| 117 | if (!test_and_clear_bit(irq, irq_affinity_request)) { |
| 118 | tmp = GxICR(irq); |
| 119 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; |
| 120 | tmp = GxICR(irq); |
| 121 | } else { |
| 122 | tmp = GxICR(irq); |
| 123 | |
| 124 | irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity); |
Mark Salter | 730c1fa | 2010-10-27 17:28:57 +0100 | [diff] [blame] | 125 | CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; |
| 126 | tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | arch_local_irq_restore(flags); |
| 130 | #else /* CONFIG_SMP */ |
| 131 | __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT); |
| 132 | #endif /* CONFIG_SMP */ |
David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 133 | } |
| 134 | |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 135 | #ifdef CONFIG_SMP |
| 136 | static int |
| 137 | mn10300_cpupic_setaffinity(unsigned int irq, const struct cpumask *mask) |
| 138 | { |
| 139 | unsigned long flags; |
| 140 | int err; |
| 141 | |
| 142 | flags = arch_local_cli_save(); |
| 143 | |
| 144 | /* check irq no */ |
| 145 | switch (irq) { |
| 146 | case TMJCIRQ: |
| 147 | case RESCHEDULE_IPI: |
| 148 | case CALL_FUNC_SINGLE_IPI: |
| 149 | case LOCAL_TIMER_IPI: |
| 150 | case FLUSH_CACHE_IPI: |
| 151 | case CALL_FUNCTION_NMI_IPI: |
| 152 | case GDB_NMI_IPI: |
| 153 | #ifdef CONFIG_MN10300_TTYSM0 |
| 154 | case SC0RXIRQ: |
| 155 | case SC0TXIRQ: |
| 156 | #ifdef CONFIG_MN10300_TTYSM0_TIMER8 |
| 157 | case TM8IRQ: |
| 158 | #elif CONFIG_MN10300_TTYSM0_TIMER2 |
| 159 | case TM2IRQ: |
| 160 | #endif /* CONFIG_MN10300_TTYSM0_TIMER8 */ |
| 161 | #endif /* CONFIG_MN10300_TTYSM0 */ |
| 162 | |
| 163 | #ifdef CONFIG_MN10300_TTYSM1 |
| 164 | case SC1RXIRQ: |
| 165 | case SC1TXIRQ: |
| 166 | #ifdef CONFIG_MN10300_TTYSM1_TIMER12 |
| 167 | case TM12IRQ: |
| 168 | #elif CONFIG_MN10300_TTYSM1_TIMER9 |
| 169 | case TM9IRQ: |
| 170 | #elif CONFIG_MN10300_TTYSM1_TIMER3 |
| 171 | case TM3IRQ: |
| 172 | #endif /* CONFIG_MN10300_TTYSM1_TIMER12 */ |
| 173 | #endif /* CONFIG_MN10300_TTYSM1 */ |
| 174 | |
| 175 | #ifdef CONFIG_MN10300_TTYSM2 |
| 176 | case SC2RXIRQ: |
| 177 | case SC2TXIRQ: |
| 178 | case TM10IRQ: |
| 179 | #endif /* CONFIG_MN10300_TTYSM2 */ |
| 180 | err = -1; |
| 181 | break; |
| 182 | |
| 183 | default: |
| 184 | set_bit(irq, irq_affinity_request); |
| 185 | err = 0; |
| 186 | break; |
| 187 | } |
| 188 | |
| 189 | arch_local_irq_restore(flags); |
| 190 | return err; |
| 191 | } |
| 192 | #endif /* CONFIG_SMP */ |
| 193 | |
David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 194 | /* |
| 195 | * MN10300 PIC level-triggered IRQ handling. |
| 196 | * |
| 197 | * The PIC has no 'ACK' function per se. It is possible to clear individual |
| 198 | * channel latches, but each latch relatches whether or not the channel is |
| 199 | * masked, so we need to clear the latch when we unmask the channel. |
| 200 | * |
| 201 | * Also for this reason, we don't supply an ack() op (it's unused anyway if |
| 202 | * mask_ack() is provided), and mask_ack() just masks. |
| 203 | */ |
| 204 | static struct irq_chip mn10300_cpu_pic_level = { |
| 205 | .name = "cpu_l", |
| 206 | .disable = mn10300_cpupic_mask, |
| 207 | .enable = mn10300_cpupic_unmask_clear, |
| 208 | .ack = NULL, |
| 209 | .mask = mn10300_cpupic_mask, |
| 210 | .mask_ack = mn10300_cpupic_mask, |
| 211 | .unmask = mn10300_cpupic_unmask_clear, |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 212 | #ifdef CONFIG_SMP |
| 213 | .set_affinity = mn10300_cpupic_setaffinity, |
Mark Salter | 730c1fa | 2010-10-27 17:28:57 +0100 | [diff] [blame] | 214 | #endif |
David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 215 | }; |
| 216 | |
| 217 | /* |
| 218 | * MN10300 PIC edge-triggered IRQ handling. |
| 219 | * |
| 220 | * We use the latch clearing function of the PIC as the 'ACK' function. |
| 221 | */ |
| 222 | static struct irq_chip mn10300_cpu_pic_edge = { |
| 223 | .name = "cpu_e", |
| 224 | .disable = mn10300_cpupic_mask, |
| 225 | .enable = mn10300_cpupic_unmask, |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 226 | .ack = mn10300_cpupic_ack, |
| 227 | .mask = mn10300_cpupic_mask, |
| 228 | .mask_ack = mn10300_cpupic_mask_ack, |
| 229 | .unmask = mn10300_cpupic_unmask, |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 230 | #ifdef CONFIG_SMP |
| 231 | .set_affinity = mn10300_cpupic_setaffinity, |
Mark Salter | 730c1fa | 2010-10-27 17:28:57 +0100 | [diff] [blame] | 232 | #endif |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 233 | }; |
| 234 | |
| 235 | /* |
| 236 | * 'what should we do if we get a hw irq event on an illegal vector'. |
| 237 | * each architecture has to answer this themselves. |
| 238 | */ |
| 239 | void ack_bad_irq(int irq) |
| 240 | { |
| 241 | printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq); |
| 242 | } |
| 243 | |
| 244 | /* |
| 245 | * change the level at which an IRQ executes |
| 246 | * - must not be called whilst interrupts are being processed! |
| 247 | */ |
| 248 | void set_intr_level(int irq, u16 level) |
| 249 | { |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 250 | BUG_ON(in_interrupt()); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 251 | |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 252 | __mask_and_set_icr(irq, GxICR_ENABLE, level); |
| 253 | } |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 254 | |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 255 | void mn10300_intc_set_level(unsigned int irq, unsigned int level) |
| 256 | { |
| 257 | set_intr_level(irq, NUM2GxICR_LEVEL(level) & GxICR_LEVEL); |
| 258 | } |
| 259 | |
| 260 | void mn10300_intc_clear(unsigned int irq) |
| 261 | { |
| 262 | __mask_and_set_icr(irq, GxICR_LEVEL | GxICR_ENABLE, GxICR_DETECT); |
| 263 | } |
| 264 | |
| 265 | void mn10300_intc_set(unsigned int irq) |
| 266 | { |
| 267 | __mask_and_set_icr(irq, 0, GxICR_REQUEST | GxICR_DETECT); |
| 268 | } |
| 269 | |
| 270 | void mn10300_intc_enable(unsigned int irq) |
| 271 | { |
| 272 | mn10300_cpupic_unmask(irq); |
| 273 | } |
| 274 | |
| 275 | void mn10300_intc_disable(unsigned int irq) |
| 276 | { |
| 277 | mn10300_cpupic_mask(irq); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 278 | } |
| 279 | |
| 280 | /* |
| 281 | * mark an interrupt to be ACK'd after interrupt handlers have been run rather |
| 282 | * than before |
| 283 | * - see Documentation/mn10300/features.txt |
| 284 | */ |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 285 | void mn10300_set_lateack_irq_type(int irq) |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 286 | { |
David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 287 | set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level, |
| 288 | handle_level_irq); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 289 | } |
| 290 | |
| 291 | /* |
| 292 | * initialise the interrupt system |
| 293 | */ |
| 294 | void __init init_IRQ(void) |
| 295 | { |
| 296 | int irq; |
| 297 | |
| 298 | for (irq = 0; irq < NR_IRQS; irq++) |
Thomas Gleixner | 91e58b6 | 2009-04-09 18:18:47 +0100 | [diff] [blame] | 299 | if (irq_desc[irq].chip == &no_irq_chip) |
David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 300 | /* due to the PIC latching interrupt requests, even |
| 301 | * when the IRQ is disabled, IRQ_PENDING is superfluous |
| 302 | * and we can use handle_level_irq() for edge-triggered |
| 303 | * interrupts */ |
| 304 | set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge, |
| 305 | handle_level_irq); |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 306 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 307 | unit_init_IRQ(); |
| 308 | } |
| 309 | |
| 310 | /* |
| 311 | * handle normal device IRQs |
| 312 | */ |
| 313 | asmlinkage void do_IRQ(void) |
| 314 | { |
| 315 | unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw; |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 316 | unsigned int cpu_id = smp_processor_id(); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 317 | int irq; |
| 318 | |
| 319 | sp = current_stack_pointer(); |
Stoyan Gaydarov | 292aa14 | 2010-10-27 17:28:33 +0100 | [diff] [blame] | 320 | BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 321 | |
| 322 | /* make sure local_irq_enable() doesn't muck up the interrupt priority |
| 323 | * setting in EPSW */ |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 324 | old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id]; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 325 | local_save_flags(epsw); |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 326 | __mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 327 | irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL; |
| 328 | |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 329 | #ifdef CONFIG_MN10300_WD_TIMER |
| 330 | __IRQ_STAT(cpu_id, __irq_count)++; |
| 331 | #endif |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 332 | |
| 333 | irq_enter(); |
| 334 | |
| 335 | for (;;) { |
| 336 | /* ask the interrupt controller for the next IRQ to process |
| 337 | * - the result we get depends on EPSW.IM |
| 338 | */ |
| 339 | irq = IAGR & IAGR_GN; |
| 340 | if (!irq) |
| 341 | break; |
| 342 | |
| 343 | local_irq_restore(irq_disabled_epsw); |
| 344 | |
| 345 | generic_handle_irq(irq >> 2); |
| 346 | |
| 347 | /* restore IRQ controls for IAGR access */ |
| 348 | local_irq_restore(epsw); |
| 349 | } |
| 350 | |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 351 | __mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 352 | |
| 353 | irq_exit(); |
| 354 | } |
| 355 | |
| 356 | /* |
| 357 | * Display interrupt management information through /proc/interrupts |
| 358 | */ |
| 359 | int show_interrupts(struct seq_file *p, void *v) |
| 360 | { |
| 361 | int i = *(loff_t *) v, j, cpu; |
| 362 | struct irqaction *action; |
| 363 | unsigned long flags; |
| 364 | |
| 365 | switch (i) { |
| 366 | /* display column title bar naming CPUs */ |
| 367 | case 0: |
| 368 | seq_printf(p, " "); |
| 369 | for (j = 0; j < NR_CPUS; j++) |
| 370 | if (cpu_online(j)) |
| 371 | seq_printf(p, "CPU%d ", j); |
| 372 | seq_putc(p, '\n'); |
| 373 | break; |
| 374 | |
| 375 | /* display information rows, one per active CPU */ |
| 376 | case 1 ... NR_IRQS - 1: |
Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 377 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 378 | |
| 379 | action = irq_desc[i].action; |
| 380 | if (action) { |
| 381 | seq_printf(p, "%3d: ", i); |
| 382 | for_each_present_cpu(cpu) |
Yinghai Lu | dee4102 | 2009-01-11 00:29:15 -0800 | [diff] [blame] | 383 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); |
David Howells | 6044cf1 | 2010-10-27 17:28:58 +0100 | [diff] [blame] | 384 | |
| 385 | if (i < NR_CPU_IRQS) |
| 386 | seq_printf(p, " %14s.%u", |
| 387 | irq_desc[i].chip->name, |
| 388 | (GxICR(i) & GxICR_LEVEL) >> |
| 389 | GxICR_LEVEL_SHIFT); |
| 390 | else |
| 391 | seq_printf(p, " %14s", |
| 392 | irq_desc[i].chip->name); |
| 393 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 394 | seq_printf(p, " %s", action->name); |
| 395 | |
| 396 | for (action = action->next; |
| 397 | action; |
| 398 | action = action->next) |
| 399 | seq_printf(p, ", %s", action->name); |
| 400 | |
| 401 | seq_putc(p, '\n'); |
| 402 | } |
| 403 | |
Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 404 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 405 | break; |
| 406 | |
| 407 | /* polish off with NMI and error counters */ |
| 408 | case NR_IRQS: |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 409 | #ifdef CONFIG_MN10300_WD_TIMER |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 410 | seq_printf(p, "NMI: "); |
| 411 | for (j = 0; j < NR_CPUS; j++) |
| 412 | if (cpu_online(j)) |
| 413 | seq_printf(p, "%10u ", nmi_count(j)); |
| 414 | seq_putc(p, '\n'); |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 415 | #endif |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 416 | |
| 417 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
| 418 | break; |
| 419 | } |
| 420 | |
| 421 | return 0; |
| 422 | } |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 423 | |
| 424 | #ifdef CONFIG_HOTPLUG_CPU |
| 425 | void migrate_irqs(void) |
| 426 | { |
| 427 | irq_desc_t *desc; |
| 428 | int irq; |
| 429 | unsigned int self, new; |
| 430 | unsigned long flags; |
| 431 | |
| 432 | self = smp_processor_id(); |
| 433 | for (irq = 0; irq < NR_IRQS; irq++) { |
| 434 | desc = irq_desc + irq; |
| 435 | |
| 436 | if (desc->status == IRQ_PER_CPU) |
| 437 | continue; |
| 438 | |
| 439 | if (cpu_isset(self, irq_desc[irq].affinity) && |
| 440 | !cpus_intersects(irq_affinity[irq], cpu_online_map)) { |
| 441 | int cpu_id; |
| 442 | cpu_id = first_cpu(cpu_online_map); |
| 443 | cpu_set(cpu_id, irq_desc[irq].affinity); |
| 444 | } |
| 445 | /* We need to operate irq_affinity_online atomically. */ |
| 446 | arch_local_cli_save(flags); |
| 447 | if (irq_affinity_online[irq] == self) { |
| 448 | u16 x, tmp; |
| 449 | |
Mark Salter | 730c1fa | 2010-10-27 17:28:57 +0100 | [diff] [blame] | 450 | x = GxICR(irq); |
| 451 | GxICR(irq) = x & GxICR_LEVEL; |
| 452 | tmp = GxICR(irq); |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 453 | |
| 454 | new = any_online_cpu(irq_desc[irq].affinity); |
| 455 | irq_affinity_online[irq] = new; |
| 456 | |
| 457 | CROSS_GxICR(irq, new) = |
| 458 | (x & GxICR_LEVEL) | GxICR_DETECT; |
| 459 | tmp = CROSS_GxICR(irq, new); |
| 460 | |
| 461 | x &= GxICR_LEVEL | GxICR_ENABLE; |
Andrew Morton | d9a1abe | 2011-01-03 14:59:11 -0800 | [diff] [blame^] | 462 | if (GxICR(irq) & GxICR_REQUEST) |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 463 | x |= GxICR_REQUEST | GxICR_DETECT; |
| 464 | CROSS_GxICR(irq, new) = x; |
| 465 | tmp = CROSS_GxICR(irq, new); |
| 466 | } |
| 467 | arch_local_irq_restore(flags); |
| 468 | } |
| 469 | } |
| 470 | #endif /* CONFIG_HOTPLUG_CPU */ |