Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar |
| 3 | * |
| 4 | * This file contains the lowest level x86-specific interrupt |
| 5 | * entry, irq-stacks and irq statistics code. All the remaining |
| 6 | * irq logic is done by the generic kernel/irq/ code and |
| 7 | * by the x86-specific irq controller code. (e.g. i8259.c and |
| 8 | * io_apic.c.) |
| 9 | */ |
| 10 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/module.h> |
| 12 | #include <linux/seq_file.h> |
| 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/kernel_stat.h> |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 15 | #include <linux/notifier.h> |
| 16 | #include <linux/cpu.h> |
| 17 | #include <linux/delay.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | |
Thomas Gleixner | e05d723 | 2007-02-16 01:27:58 -0800 | [diff] [blame] | 19 | #include <asm/apic.h> |
| 20 | #include <asm/uaccess.h> |
| 21 | |
Fenghua Yu | f34e3b6 | 2007-07-19 01:48:13 -0700 | [diff] [blame] | 22 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | EXPORT_PER_CPU_SYMBOL(irq_stat); |
| 24 | |
Jeremy Fitzhardinge | 7c3576d | 2007-05-02 19:27:16 +0200 | [diff] [blame] | 25 | DEFINE_PER_CPU(struct pt_regs *, irq_regs); |
| 26 | EXPORT_PER_CPU_SYMBOL(irq_regs); |
| 27 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | /* |
| 29 | * 'what should we do if we get a hw irq event on an illegal vector'. |
| 30 | * each architecture has to answer this themselves. |
| 31 | */ |
| 32 | void ack_bad_irq(unsigned int irq) |
| 33 | { |
Thomas Gleixner | e05d723 | 2007-02-16 01:27:58 -0800 | [diff] [blame] | 34 | printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq); |
| 35 | |
| 36 | #ifdef CONFIG_X86_LOCAL_APIC |
| 37 | /* |
| 38 | * Currently unexpected vectors happen only on SMP and APIC. |
| 39 | * We _must_ ack these because every local APIC has only N |
| 40 | * irq slots per priority level, and a 'hanging, unacked' IRQ |
| 41 | * holds up an irq slot - in excessive cases (when multiple |
| 42 | * unexpected vectors occur) that might lock up the APIC |
| 43 | * completely. |
| 44 | * But only ack when the APIC is enabled -AK |
| 45 | */ |
| 46 | if (cpu_has_apic) |
| 47 | ack_APIC_irq(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | #endif |
Thomas Gleixner | e05d723 | 2007-02-16 01:27:58 -0800 | [diff] [blame] | 49 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | |
Thomas Gleixner | de9b10a | 2008-05-05 15:58:15 +0200 | [diff] [blame] | 51 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
| 52 | /* Debugging check for stack overflow: is there less than 1KB free? */ |
| 53 | static int check_stack_overflow(void) |
| 54 | { |
| 55 | long sp; |
| 56 | |
| 57 | __asm__ __volatile__("andl %%esp,%0" : |
| 58 | "=r" (sp) : "0" (THREAD_SIZE - 1)); |
| 59 | |
| 60 | return sp < (sizeof(struct thread_info) + STACK_WARN); |
| 61 | } |
| 62 | |
| 63 | static void print_stack_overflow(void) |
| 64 | { |
| 65 | printk(KERN_WARNING "low stack detected by irq handler\n"); |
| 66 | dump_stack(); |
| 67 | } |
| 68 | |
| 69 | #else |
| 70 | static inline int check_stack_overflow(void) { return 0; } |
| 71 | static inline void print_stack_overflow(void) { } |
| 72 | #endif |
| 73 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | #ifdef CONFIG_4KSTACKS |
| 75 | /* |
| 76 | * per-CPU IRQ handling contexts (thread information and stack) |
| 77 | */ |
| 78 | union irq_ctx { |
| 79 | struct thread_info tinfo; |
| 80 | u32 stack[THREAD_SIZE/sizeof(u32)]; |
| 81 | }; |
| 82 | |
Andreas Mohr | 2272205 | 2006-06-23 02:05:30 -0700 | [diff] [blame] | 83 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; |
| 84 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | |
Jeremy Fitzhardinge | cbcd79c | 2008-07-08 15:06:27 -0700 | [diff] [blame] | 86 | static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; |
| 87 | static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 88 | |
| 89 | static void call_on_stack(void *func, void *stack) |
| 90 | { |
| 91 | asm volatile("xchgl %%ebx,%%esp \n" |
| 92 | "call *%%edi \n" |
| 93 | "movl %%ebx,%%esp \n" |
| 94 | : "=b" (stack) |
| 95 | : "0" (stack), |
| 96 | "D"(func) |
| 97 | : "memory", "cc", "edx", "ecx", "eax"); |
Andi Kleen | 04b361a | 2008-05-05 12:36:38 +0200 | [diff] [blame] | 98 | } |
| 99 | |
Thomas Gleixner | de9b10a | 2008-05-05 15:58:15 +0200 | [diff] [blame] | 100 | static inline int |
| 101 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) |
| 102 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | union irq_ctx *curctx, *irqctx; |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 104 | u32 *isp, arg1, arg2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | |
| 106 | curctx = (union irq_ctx *) current_thread_info(); |
| 107 | irqctx = hardirq_ctx[smp_processor_id()]; |
| 108 | |
| 109 | /* |
| 110 | * this is where we switch to the IRQ stack. However, if we are |
| 111 | * already using the IRQ stack (because we interrupted a hardirq |
| 112 | * handler) we can't do that and just have to keep using the |
| 113 | * current stack (which is the irq stack already after all) |
| 114 | */ |
Thomas Gleixner | de9b10a | 2008-05-05 15:58:15 +0200 | [diff] [blame] | 115 | if (unlikely(curctx == irqctx)) |
| 116 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | |
Thomas Gleixner | de9b10a | 2008-05-05 15:58:15 +0200 | [diff] [blame] | 118 | /* build the stack frame on the IRQ stack */ |
| 119 | isp = (u32 *) ((char*)irqctx + sizeof(*irqctx)); |
| 120 | irqctx->tinfo.task = curctx->tinfo.task; |
| 121 | irqctx->tinfo.previous_esp = current_stack_pointer; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | |
Thomas Gleixner | de9b10a | 2008-05-05 15:58:15 +0200 | [diff] [blame] | 123 | /* |
| 124 | * Copy the softirq bits in preempt_count so that the |
| 125 | * softirq checks work in the hardirq context. |
| 126 | */ |
| 127 | irqctx->tinfo.preempt_count = |
| 128 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | |
| 129 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); |
Björn Steinbrink | a5d157e | 2006-06-25 16:24:40 +0200 | [diff] [blame] | 130 | |
Thomas Gleixner | de9b10a | 2008-05-05 15:58:15 +0200 | [diff] [blame] | 131 | if (unlikely(overflow)) |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 132 | call_on_stack(print_stack_overflow, isp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 134 | asm volatile("xchgl %%ebx,%%esp \n" |
| 135 | "call *%%edi \n" |
| 136 | "movl %%ebx,%%esp \n" |
| 137 | : "=a" (arg1), "=d" (arg2), "=b" (isp) |
| 138 | : "0" (irq), "1" (desc), "2" (isp), |
| 139 | "D" (desc->handle_irq) |
| 140 | : "memory", "cc", "ecx"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | return 1; |
| 142 | } |
| 143 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | /* |
| 145 | * allocate per-cpu stacks for hardirq and for softirq processing |
| 146 | */ |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 147 | void __cpuinit irq_ctx_init(int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | { |
| 149 | union irq_ctx *irqctx; |
| 150 | |
| 151 | if (hardirq_ctx[cpu]) |
| 152 | return; |
| 153 | |
| 154 | irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE]; |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 155 | irqctx->tinfo.task = NULL; |
| 156 | irqctx->tinfo.exec_domain = NULL; |
| 157 | irqctx->tinfo.cpu = cpu; |
| 158 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; |
| 159 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | |
| 161 | hardirq_ctx[cpu] = irqctx; |
| 162 | |
| 163 | irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE]; |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 164 | irqctx->tinfo.task = NULL; |
| 165 | irqctx->tinfo.exec_domain = NULL; |
| 166 | irqctx->tinfo.cpu = cpu; |
| 167 | irqctx->tinfo.preempt_count = 0; |
| 168 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | |
| 170 | softirq_ctx[cpu] = irqctx; |
| 171 | |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 172 | printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", |
| 173 | cpu,hardirq_ctx[cpu],softirq_ctx[cpu]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | } |
| 175 | |
Li Shaohua | e1367da | 2005-06-25 14:54:56 -0700 | [diff] [blame] | 176 | void irq_ctx_exit(int cpu) |
| 177 | { |
| 178 | hardirq_ctx[cpu] = NULL; |
| 179 | } |
| 180 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | asmlinkage void do_softirq(void) |
| 182 | { |
| 183 | unsigned long flags; |
| 184 | struct thread_info *curctx; |
| 185 | union irq_ctx *irqctx; |
| 186 | u32 *isp; |
| 187 | |
| 188 | if (in_interrupt()) |
| 189 | return; |
| 190 | |
| 191 | local_irq_save(flags); |
| 192 | |
| 193 | if (local_softirq_pending()) { |
| 194 | curctx = current_thread_info(); |
| 195 | irqctx = softirq_ctx[smp_processor_id()]; |
| 196 | irqctx->tinfo.task = curctx->task; |
| 197 | irqctx->tinfo.previous_esp = current_stack_pointer; |
| 198 | |
| 199 | /* build the stack frame on the softirq stack */ |
| 200 | isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); |
| 201 | |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 202 | call_on_stack(__do_softirq, isp); |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 203 | /* |
| 204 | * Shouldnt happen, we returned above if in_interrupt(): |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 205 | */ |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 206 | WARN_ON_ONCE(softirq_count()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | } |
| 208 | |
| 209 | local_irq_restore(flags); |
| 210 | } |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 211 | |
Thomas Gleixner | de9b10a | 2008-05-05 15:58:15 +0200 | [diff] [blame] | 212 | #else |
| 213 | static inline int |
| 214 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | #endif |
| 216 | |
| 217 | /* |
Thomas Gleixner | de9b10a | 2008-05-05 15:58:15 +0200 | [diff] [blame] | 218 | * do_IRQ handles all normal device IRQ's (the special |
| 219 | * SMP cross-CPU interrupts have their own specific |
| 220 | * handlers). |
| 221 | */ |
| 222 | unsigned int do_IRQ(struct pt_regs *regs) |
| 223 | { |
| 224 | struct pt_regs *old_regs; |
| 225 | /* high bit used in ret_from_ code */ |
Yinghai Lu | 497c9a1 | 2008-08-19 20:50:28 -0700 | [diff] [blame] | 226 | int overflow; |
| 227 | unsigned vector = ~regs->orig_ax; |
Yinghai Lu | 199751d | 2008-08-19 20:50:27 -0700 | [diff] [blame] | 228 | struct irq_desc *desc; |
Yinghai Lu | 497c9a1 | 2008-08-19 20:50:28 -0700 | [diff] [blame] | 229 | unsigned irq; |
Thomas Gleixner | de9b10a | 2008-05-05 15:58:15 +0200 | [diff] [blame] | 230 | |
Thomas Gleixner | de9b10a | 2008-05-05 15:58:15 +0200 | [diff] [blame] | 231 | |
| 232 | old_regs = set_irq_regs(regs); |
| 233 | irq_enter(); |
Yinghai Lu | 497c9a1 | 2008-08-19 20:50:28 -0700 | [diff] [blame] | 234 | irq = __get_cpu_var(vector_irq)[vector]; |
Thomas Gleixner | de9b10a | 2008-05-05 15:58:15 +0200 | [diff] [blame] | 235 | |
| 236 | overflow = check_stack_overflow(); |
| 237 | |
Yinghai Lu | 497c9a1 | 2008-08-19 20:50:28 -0700 | [diff] [blame] | 238 | desc = irq_to_desc(irq); |
| 239 | if (unlikely(!desc)) { |
Yinghai Lu | 7a959cf | 2008-08-19 20:50:32 -0700 | [diff] [blame^] | 240 | printk(KERN_EMERG "%s: cannot handle IRQ %d vector %#x cpu %d\n", |
| 241 | __func__, irq, vector, smp_processor_id()); |
Yinghai Lu | 497c9a1 | 2008-08-19 20:50:28 -0700 | [diff] [blame] | 242 | BUG(); |
| 243 | } |
| 244 | |
Thomas Gleixner | de9b10a | 2008-05-05 15:58:15 +0200 | [diff] [blame] | 245 | if (!execute_on_irq_stack(overflow, desc, irq)) { |
| 246 | if (unlikely(overflow)) |
| 247 | print_stack_overflow(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | desc->handle_irq(irq, desc); |
Andi Kleen | 04b361a | 2008-05-05 12:36:38 +0200 | [diff] [blame] | 249 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | |
| 251 | irq_exit(); |
| 252 | set_irq_regs(old_regs); |
| 253 | return 1; |
| 254 | } |
| 255 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | /* |
| 257 | * Interrupt statistics: |
| 258 | */ |
| 259 | |
| 260 | atomic_t irq_err_count; |
| 261 | |
| 262 | /* |
| 263 | * /proc/interrupts printing: |
| 264 | */ |
| 265 | |
| 266 | int show_interrupts(struct seq_file *p, void *v) |
| 267 | { |
| 268 | int i = *(loff_t *) v, j; |
| 269 | struct irqaction * action; |
| 270 | unsigned long flags; |
Yinghai Lu | 199751d | 2008-08-19 20:50:27 -0700 | [diff] [blame] | 271 | unsigned int entries; |
| 272 | struct irq_desc *desc; |
| 273 | int tail = 0; |
| 274 | |
| 275 | #ifdef CONFIG_HAVE_SPARSE_IRQ |
| 276 | desc = (struct irq_desc *)v; |
| 277 | entries = -1U; |
| 278 | i = desc->irq; |
| 279 | if (!desc->next) |
| 280 | tail = 1; |
| 281 | #else |
| 282 | entries = nr_irqs - 1; |
| 283 | i = *(loff_t *) v; |
| 284 | if (i == nr_irqs) |
| 285 | tail = 1; |
| 286 | else |
| 287 | desc = irq_to_desc(i); |
| 288 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | |
| 290 | if (i == 0) { |
| 291 | seq_printf(p, " "); |
Natalie Protasevich | 9f40a72 | 2005-10-30 14:59:32 -0800 | [diff] [blame] | 292 | for_each_online_cpu(j) |
Jan Beulich | bdbdaa7 | 2006-06-26 13:59:23 +0200 | [diff] [blame] | 293 | seq_printf(p, "CPU%-8d",j); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | seq_putc(p, '\n'); |
| 295 | } |
| 296 | |
Yinghai Lu | 199751d | 2008-08-19 20:50:27 -0700 | [diff] [blame] | 297 | if (i <= entries) { |
Jan Beulich | 072f5d8 | 2007-10-17 18:04:40 +0200 | [diff] [blame] | 298 | unsigned any_count = 0; |
| 299 | |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 300 | spin_lock_irqsave(&desc->lock, flags); |
Jan Beulich | 072f5d8 | 2007-10-17 18:04:40 +0200 | [diff] [blame] | 301 | #ifndef CONFIG_SMP |
| 302 | any_count = kstat_irqs(i); |
| 303 | #else |
| 304 | for_each_online_cpu(j) |
Yinghai Lu | 7f95ec9 | 2008-08-19 20:50:09 -0700 | [diff] [blame] | 305 | any_count |= kstat_irqs_cpu(i, j); |
Jan Beulich | 072f5d8 | 2007-10-17 18:04:40 +0200 | [diff] [blame] | 306 | #endif |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 307 | action = desc->action; |
Jan Beulich | 072f5d8 | 2007-10-17 18:04:40 +0200 | [diff] [blame] | 308 | if (!action && !any_count) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | goto skip; |
Yinghai Lu | 199751d | 2008-08-19 20:50:27 -0700 | [diff] [blame] | 310 | seq_printf(p, "%#x: ",i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | #ifndef CONFIG_SMP |
| 312 | seq_printf(p, "%10u ", kstat_irqs(i)); |
| 313 | #else |
Natalie Protasevich | 9f40a72 | 2005-10-30 14:59:32 -0800 | [diff] [blame] | 314 | for_each_online_cpu(j) |
Yinghai Lu | 7f95ec9 | 2008-08-19 20:50:09 -0700 | [diff] [blame] | 315 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | #endif |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 317 | seq_printf(p, " %8s", desc->chip->name); |
| 318 | seq_printf(p, "-%-8s", desc->name); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | |
Jan Beulich | 072f5d8 | 2007-10-17 18:04:40 +0200 | [diff] [blame] | 320 | if (action) { |
| 321 | seq_printf(p, " %s", action->name); |
| 322 | while ((action = action->next) != NULL) |
| 323 | seq_printf(p, ", %s", action->name); |
| 324 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | |
| 326 | seq_putc(p, '\n'); |
| 327 | skip: |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 328 | spin_unlock_irqrestore(&desc->lock, flags); |
Yinghai Lu | 199751d | 2008-08-19 20:50:27 -0700 | [diff] [blame] | 329 | } |
| 330 | |
| 331 | if (tail) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | seq_printf(p, "NMI: "); |
Natalie Protasevich | 9f40a72 | 2005-10-30 14:59:32 -0800 | [diff] [blame] | 333 | for_each_online_cpu(j) |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 334 | seq_printf(p, "%10u ", nmi_count(j)); |
Joe Korty | 38e760a | 2007-10-17 18:04:40 +0200 | [diff] [blame] | 335 | seq_printf(p, " Non-maskable interrupts\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | #ifdef CONFIG_X86_LOCAL_APIC |
| 337 | seq_printf(p, "LOC: "); |
Natalie Protasevich | 9f40a72 | 2005-10-30 14:59:32 -0800 | [diff] [blame] | 338 | for_each_online_cpu(j) |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 339 | seq_printf(p, "%10u ", |
| 340 | per_cpu(irq_stat,j).apic_timer_irqs); |
Joe Korty | 38e760a | 2007-10-17 18:04:40 +0200 | [diff] [blame] | 341 | seq_printf(p, " Local timer interrupts\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | #endif |
Joe Korty | 38e760a | 2007-10-17 18:04:40 +0200 | [diff] [blame] | 343 | #ifdef CONFIG_SMP |
| 344 | seq_printf(p, "RES: "); |
| 345 | for_each_online_cpu(j) |
| 346 | seq_printf(p, "%10u ", |
| 347 | per_cpu(irq_stat,j).irq_resched_count); |
| 348 | seq_printf(p, " Rescheduling interrupts\n"); |
| 349 | seq_printf(p, "CAL: "); |
| 350 | for_each_online_cpu(j) |
| 351 | seq_printf(p, "%10u ", |
| 352 | per_cpu(irq_stat,j).irq_call_count); |
Andi Kleen | dc44e65 | 2008-09-04 13:47:38 +0200 | [diff] [blame] | 353 | seq_printf(p, " Function call interrupts\n"); |
Joe Korty | 38e760a | 2007-10-17 18:04:40 +0200 | [diff] [blame] | 354 | seq_printf(p, "TLB: "); |
| 355 | for_each_online_cpu(j) |
| 356 | seq_printf(p, "%10u ", |
| 357 | per_cpu(irq_stat,j).irq_tlb_count); |
| 358 | seq_printf(p, " TLB shootdowns\n"); |
| 359 | #endif |
Jan Beulich | a2eddfa | 2008-05-12 15:44:41 +0200 | [diff] [blame] | 360 | #ifdef CONFIG_X86_MCE |
Joe Korty | 38e760a | 2007-10-17 18:04:40 +0200 | [diff] [blame] | 361 | seq_printf(p, "TRM: "); |
| 362 | for_each_online_cpu(j) |
| 363 | seq_printf(p, "%10u ", |
| 364 | per_cpu(irq_stat,j).irq_thermal_count); |
| 365 | seq_printf(p, " Thermal event interrupts\n"); |
Jan Beulich | a2eddfa | 2008-05-12 15:44:41 +0200 | [diff] [blame] | 366 | #endif |
| 367 | #ifdef CONFIG_X86_LOCAL_APIC |
Joe Korty | 38e760a | 2007-10-17 18:04:40 +0200 | [diff] [blame] | 368 | seq_printf(p, "SPU: "); |
| 369 | for_each_online_cpu(j) |
| 370 | seq_printf(p, "%10u ", |
| 371 | per_cpu(irq_stat,j).irq_spurious_count); |
| 372 | seq_printf(p, " Spurious interrupts\n"); |
Jan Beulich | a2eddfa | 2008-05-12 15:44:41 +0200 | [diff] [blame] | 373 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
| 375 | #if defined(CONFIG_X86_IO_APIC) |
| 376 | seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count)); |
| 377 | #endif |
| 378 | } |
| 379 | return 0; |
| 380 | } |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 381 | |
Jan Beulich | a2eddfa | 2008-05-12 15:44:41 +0200 | [diff] [blame] | 382 | /* |
| 383 | * /proc/stat helpers |
| 384 | */ |
| 385 | u64 arch_irq_stat_cpu(unsigned int cpu) |
| 386 | { |
| 387 | u64 sum = nmi_count(cpu); |
| 388 | |
| 389 | #ifdef CONFIG_X86_LOCAL_APIC |
| 390 | sum += per_cpu(irq_stat, cpu).apic_timer_irqs; |
| 391 | #endif |
| 392 | #ifdef CONFIG_SMP |
| 393 | sum += per_cpu(irq_stat, cpu).irq_resched_count; |
| 394 | sum += per_cpu(irq_stat, cpu).irq_call_count; |
| 395 | sum += per_cpu(irq_stat, cpu).irq_tlb_count; |
| 396 | #endif |
| 397 | #ifdef CONFIG_X86_MCE |
| 398 | sum += per_cpu(irq_stat, cpu).irq_thermal_count; |
| 399 | #endif |
| 400 | #ifdef CONFIG_X86_LOCAL_APIC |
| 401 | sum += per_cpu(irq_stat, cpu).irq_spurious_count; |
| 402 | #endif |
| 403 | return sum; |
| 404 | } |
| 405 | |
| 406 | u64 arch_irq_stat(void) |
| 407 | { |
| 408 | u64 sum = atomic_read(&irq_err_count); |
| 409 | |
| 410 | #ifdef CONFIG_X86_IO_APIC |
| 411 | sum += atomic_read(&irq_mis_count); |
| 412 | #endif |
| 413 | return sum; |
| 414 | } |
| 415 | |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 416 | #ifdef CONFIG_HOTPLUG_CPU |
| 417 | #include <mach_apic.h> |
| 418 | |
| 419 | void fixup_irqs(cpumask_t map) |
| 420 | { |
| 421 | unsigned int irq; |
| 422 | static int warned; |
Yinghai Lu | 199751d | 2008-08-19 20:50:27 -0700 | [diff] [blame] | 423 | struct irq_desc *desc; |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 424 | |
Yinghai Lu | 199751d | 2008-08-19 20:50:27 -0700 | [diff] [blame] | 425 | for_each_irq_desc(irq, desc) { |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 426 | cpumask_t mask; |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 427 | |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 428 | if (irq == 2) |
| 429 | continue; |
| 430 | |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 431 | cpus_and(mask, desc->affinity, map); |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 432 | if (any_online_cpu(mask) == NR_CPUS) { |
| 433 | printk("Breaking affinity for irq %i\n", irq); |
| 434 | mask = map; |
| 435 | } |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 436 | if (desc->chip->set_affinity) |
| 437 | desc->chip->set_affinity(irq, mask); |
| 438 | else if (desc->action && !(warned++)) |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 439 | printk("Cannot set affinity for irq %i\n", irq); |
| 440 | } |
| 441 | |
| 442 | #if 0 |
| 443 | barrier(); |
| 444 | /* Ingo Molnar says: "after the IO-APIC masks have been redirected |
| 445 | [note the nop - the interrupt-enable boundary on x86 is two |
| 446 | instructions from sti] - to flush out pending hardirqs and |
| 447 | IPIs. After this point nothing is supposed to reach this CPU." */ |
| 448 | __asm__ __volatile__("sti; nop; cli"); |
| 449 | barrier(); |
| 450 | #else |
| 451 | /* That doesn't seem sufficient. Give it 1ms. */ |
| 452 | local_irq_enable(); |
| 453 | mdelay(1); |
| 454 | local_irq_disable(); |
| 455 | #endif |
| 456 | } |
| 457 | #endif |
| 458 | |