blob: 4e3e8ec60276a99467c3319b3736c8a4fe5e26f9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
3 *
4 * This file contains the lowest level x86-specific interrupt
5 * entry, irq-stacks and irq statistics code. All the remaining
6 * irq logic is done by the generic kernel/irq/ code and
7 * by the x86-specific irq controller code. (e.g. i8259.c and
8 * io_apic.c.)
9 */
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/module.h>
12#include <linux/seq_file.h>
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
Zwane Mwaikambof3705132005-06-25 14:54:50 -070015#include <linux/notifier.h>
16#include <linux/cpu.h>
17#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Thomas Gleixnere05d7232007-02-16 01:27:58 -080019#include <asm/apic.h>
20#include <asm/uaccess.h>
21
Fenghua Yuf34e3b62007-07-19 01:48:13 -070022DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
Linus Torvalds1da177e2005-04-16 15:20:36 -070023EXPORT_PER_CPU_SYMBOL(irq_stat);
24
Jeremy Fitzhardinge7c3576d2007-05-02 19:27:16 +020025DEFINE_PER_CPU(struct pt_regs *, irq_regs);
26EXPORT_PER_CPU_SYMBOL(irq_regs);
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028/*
29 * 'what should we do if we get a hw irq event on an illegal vector'.
30 * each architecture has to answer this themselves.
31 */
32void ack_bad_irq(unsigned int irq)
33{
Thomas Gleixnere05d7232007-02-16 01:27:58 -080034 printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
35
36#ifdef CONFIG_X86_LOCAL_APIC
37 /*
38 * Currently unexpected vectors happen only on SMP and APIC.
39 * We _must_ ack these because every local APIC has only N
40 * irq slots per priority level, and a 'hanging, unacked' IRQ
41 * holds up an irq slot - in excessive cases (when multiple
42 * unexpected vectors occur) that might lock up the APIC
43 * completely.
44 * But only ack when the APIC is enabled -AK
45 */
46 if (cpu_has_apic)
47 ack_APIC_irq();
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#endif
Thomas Gleixnere05d7232007-02-16 01:27:58 -080049}
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +020051#ifdef CONFIG_DEBUG_STACKOVERFLOW
52/* Debugging check for stack overflow: is there less than 1KB free? */
53static int check_stack_overflow(void)
54{
55 long sp;
56
57 __asm__ __volatile__("andl %%esp,%0" :
58 "=r" (sp) : "0" (THREAD_SIZE - 1));
59
60 return sp < (sizeof(struct thread_info) + STACK_WARN);
61}
62
63static void print_stack_overflow(void)
64{
65 printk(KERN_WARNING "low stack detected by irq handler\n");
66 dump_stack();
67}
68
69#else
70static inline int check_stack_overflow(void) { return 0; }
71static inline void print_stack_overflow(void) { }
72#endif
73
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#ifdef CONFIG_4KSTACKS
75/*
76 * per-CPU IRQ handling contexts (thread information and stack)
77 */
78union irq_ctx {
79 struct thread_info tinfo;
80 u32 stack[THREAD_SIZE/sizeof(u32)];
81};
82
Andreas Mohr22722052006-06-23 02:05:30 -070083static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
84static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Thomas Gleixner403d8ef2008-05-05 18:13:50 +020086static char softirq_stack[NR_CPUS * THREAD_SIZE]
87 __attribute__((__section__(".bss.page_aligned")));
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +020088
Thomas Gleixner403d8ef2008-05-05 18:13:50 +020089static char hardirq_stack[NR_CPUS * THREAD_SIZE]
90 __attribute__((__section__(".bss.page_aligned")));
91
92static void call_on_stack(void *func, void *stack)
93{
94 asm volatile("xchgl %%ebx,%%esp \n"
95 "call *%%edi \n"
96 "movl %%ebx,%%esp \n"
97 : "=b" (stack)
98 : "0" (stack),
99 "D"(func)
100 : "memory", "cc", "edx", "ecx", "eax");
Andi Kleen04b361a2008-05-05 12:36:38 +0200101}
102
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +0200103static inline int
104execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
105{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 union irq_ctx *curctx, *irqctx;
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200107 u32 *isp, arg1, arg2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109 curctx = (union irq_ctx *) current_thread_info();
110 irqctx = hardirq_ctx[smp_processor_id()];
111
112 /*
113 * this is where we switch to the IRQ stack. However, if we are
114 * already using the IRQ stack (because we interrupted a hardirq
115 * handler) we can't do that and just have to keep using the
116 * current stack (which is the irq stack already after all)
117 */
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +0200118 if (unlikely(curctx == irqctx))
119 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +0200121 /* build the stack frame on the IRQ stack */
122 isp = (u32 *) ((char*)irqctx + sizeof(*irqctx));
123 irqctx->tinfo.task = curctx->tinfo.task;
124 irqctx->tinfo.previous_esp = current_stack_pointer;
Björn Steinbrinka5d157e2006-06-25 16:24:40 +0200125
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +0200126 /*
127 * Copy the softirq bits in preempt_count so that the
128 * softirq checks work in the hardirq context.
129 */
130 irqctx->tinfo.preempt_count =
131 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
132 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
Andi Kleen04b361a2008-05-05 12:36:38 +0200133
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +0200134 if (unlikely(overflow))
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200135 call_on_stack(print_stack_overflow, isp);
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +0200136
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200137 asm volatile("xchgl %%ebx,%%esp \n"
138 "call *%%edi \n"
139 "movl %%ebx,%%esp \n"
140 : "=a" (arg1), "=d" (arg2), "=b" (isp)
141 : "0" (irq), "1" (desc), "2" (isp),
142 "D" (desc->handle_irq)
143 : "memory", "cc", "ecx");
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +0200144 return 1;
145}
146
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200147/*
148 * allocate per-cpu stacks for hardirq and for softirq processing
149 */
150void __cpuinit irq_ctx_init(int cpu)
151{
152 union irq_ctx *irqctx;
153
154 if (hardirq_ctx[cpu])
155 return;
156
157 irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
158 irqctx->tinfo.task = NULL;
159 irqctx->tinfo.exec_domain = NULL;
160 irqctx->tinfo.cpu = cpu;
161 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
162 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
163
164 hardirq_ctx[cpu] = irqctx;
165
166 irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
167 irqctx->tinfo.task = NULL;
168 irqctx->tinfo.exec_domain = NULL;
169 irqctx->tinfo.cpu = cpu;
170 irqctx->tinfo.preempt_count = 0;
171 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
172
173 softirq_ctx[cpu] = irqctx;
174
175 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
176 cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
177}
178
179void irq_ctx_exit(int cpu)
180{
181 hardirq_ctx[cpu] = NULL;
182}
183
184asmlinkage void do_softirq(void)
185{
186 unsigned long flags;
187 struct thread_info *curctx;
188 union irq_ctx *irqctx;
189 u32 *isp;
190
191 if (in_interrupt())
192 return;
193
194 local_irq_save(flags);
195
196 if (local_softirq_pending()) {
197 curctx = current_thread_info();
198 irqctx = softirq_ctx[smp_processor_id()];
199 irqctx->tinfo.task = curctx->task;
200 irqctx->tinfo.previous_esp = current_stack_pointer;
201
202 /* build the stack frame on the softirq stack */
203 isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
204
205 call_on_stack(__do_softirq, isp);
206 /*
207 * Shouldnt happen, we returned above if in_interrupt():
208 */
209 WARN_ON_ONCE(softirq_count());
210 }
211
212 local_irq_restore(flags);
213}
214
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +0200215#else
216static inline int
217execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218#endif
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +0200219
220/*
221 * do_IRQ handles all normal device IRQ's (the special
222 * SMP cross-CPU interrupts have their own specific
223 * handlers).
224 */
225unsigned int do_IRQ(struct pt_regs *regs)
226{
227 struct pt_regs *old_regs;
228 /* high bit used in ret_from_ code */
229 int overflow, irq = ~regs->orig_ax;
230 struct irq_desc *desc = irq_desc + irq;
231
232 if (unlikely((unsigned)irq >= NR_IRQS)) {
233 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
234 __func__, irq);
235 BUG();
236 }
237
238 old_regs = set_irq_regs(regs);
239 irq_enter();
240
241 overflow = check_stack_overflow();
242
243 if (!execute_on_irq_stack(overflow, desc, irq)) {
244 if (unlikely(overflow))
245 print_stack_overflow();
David Howells7d12e782006-10-05 14:55:46 +0100246 desc->handle_irq(irq, desc);
Andi Kleen04b361a2008-05-05 12:36:38 +0200247 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
249 irq_exit();
David Howells7d12e782006-10-05 14:55:46 +0100250 set_irq_regs(old_regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 return 1;
252}
253
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254/*
255 * Interrupt statistics:
256 */
257
258atomic_t irq_err_count;
259
260/*
261 * /proc/interrupts printing:
262 */
263
264int show_interrupts(struct seq_file *p, void *v)
265{
266 int i = *(loff_t *) v, j;
267 struct irqaction * action;
268 unsigned long flags;
269
270 if (i == 0) {
271 seq_printf(p, " ");
Natalie Protasevich9f40a722005-10-30 14:59:32 -0800272 for_each_online_cpu(j)
Jan Beulichbdbdaa72006-06-26 13:59:23 +0200273 seq_printf(p, "CPU%-8d",j);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 seq_putc(p, '\n');
275 }
276
277 if (i < NR_IRQS) {
Jan Beulich072f5d82007-10-17 18:04:40 +0200278 unsigned any_count = 0;
279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 spin_lock_irqsave(&irq_desc[i].lock, flags);
Jan Beulich072f5d82007-10-17 18:04:40 +0200281#ifndef CONFIG_SMP
282 any_count = kstat_irqs(i);
283#else
284 for_each_online_cpu(j)
285 any_count |= kstat_cpu(j).irqs[i];
286#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 action = irq_desc[i].action;
Jan Beulich072f5d82007-10-17 18:04:40 +0200288 if (!action && !any_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 goto skip;
290 seq_printf(p, "%3d: ",i);
291#ifndef CONFIG_SMP
292 seq_printf(p, "%10u ", kstat_irqs(i));
293#else
Natalie Protasevich9f40a722005-10-30 14:59:32 -0800294 for_each_online_cpu(j)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700295 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296#endif
Ingo Molnarf5b9ed72006-10-04 02:16:26 -0700297 seq_printf(p, " %8s", irq_desc[i].chip->name);
Ingo Molnara460e742006-10-17 00:10:03 -0700298 seq_printf(p, "-%-8s", irq_desc[i].name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
Jan Beulich072f5d82007-10-17 18:04:40 +0200300 if (action) {
301 seq_printf(p, " %s", action->name);
302 while ((action = action->next) != NULL)
303 seq_printf(p, ", %s", action->name);
304 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
306 seq_putc(p, '\n');
307skip:
308 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
309 } else if (i == NR_IRQS) {
310 seq_printf(p, "NMI: ");
Natalie Protasevich9f40a722005-10-30 14:59:32 -0800311 for_each_online_cpu(j)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700312 seq_printf(p, "%10u ", nmi_count(j));
Joe Korty38e760a2007-10-17 18:04:40 +0200313 seq_printf(p, " Non-maskable interrupts\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314#ifdef CONFIG_X86_LOCAL_APIC
315 seq_printf(p, "LOC: ");
Natalie Protasevich9f40a722005-10-30 14:59:32 -0800316 for_each_online_cpu(j)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700317 seq_printf(p, "%10u ",
318 per_cpu(irq_stat,j).apic_timer_irqs);
Joe Korty38e760a2007-10-17 18:04:40 +0200319 seq_printf(p, " Local timer interrupts\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320#endif
Joe Korty38e760a2007-10-17 18:04:40 +0200321#ifdef CONFIG_SMP
322 seq_printf(p, "RES: ");
323 for_each_online_cpu(j)
324 seq_printf(p, "%10u ",
325 per_cpu(irq_stat,j).irq_resched_count);
326 seq_printf(p, " Rescheduling interrupts\n");
327 seq_printf(p, "CAL: ");
328 for_each_online_cpu(j)
329 seq_printf(p, "%10u ",
330 per_cpu(irq_stat,j).irq_call_count);
331 seq_printf(p, " function call interrupts\n");
332 seq_printf(p, "TLB: ");
333 for_each_online_cpu(j)
334 seq_printf(p, "%10u ",
335 per_cpu(irq_stat,j).irq_tlb_count);
336 seq_printf(p, " TLB shootdowns\n");
337#endif
338 seq_printf(p, "TRM: ");
339 for_each_online_cpu(j)
340 seq_printf(p, "%10u ",
341 per_cpu(irq_stat,j).irq_thermal_count);
342 seq_printf(p, " Thermal event interrupts\n");
343 seq_printf(p, "SPU: ");
344 for_each_online_cpu(j)
345 seq_printf(p, "%10u ",
346 per_cpu(irq_stat,j).irq_spurious_count);
347 seq_printf(p, " Spurious interrupts\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
349#if defined(CONFIG_X86_IO_APIC)
350 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
351#endif
352 }
353 return 0;
354}
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700355
356#ifdef CONFIG_HOTPLUG_CPU
357#include <mach_apic.h>
358
359void fixup_irqs(cpumask_t map)
360{
361 unsigned int irq;
362 static int warned;
363
364 for (irq = 0; irq < NR_IRQS; irq++) {
365 cpumask_t mask;
366 if (irq == 2)
367 continue;
368
Ingo Molnara53da522006-06-29 02:24:38 -0700369 cpus_and(mask, irq_desc[irq].affinity, map);
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700370 if (any_online_cpu(mask) == NR_CPUS) {
371 printk("Breaking affinity for irq %i\n", irq);
372 mask = map;
373 }
Ingo Molnard1bef4e2006-06-29 02:24:36 -0700374 if (irq_desc[irq].chip->set_affinity)
375 irq_desc[irq].chip->set_affinity(irq, mask);
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700376 else if (irq_desc[irq].action && !(warned++))
377 printk("Cannot set affinity for irq %i\n", irq);
378 }
379
380#if 0
381 barrier();
382 /* Ingo Molnar says: "after the IO-APIC masks have been redirected
383 [note the nop - the interrupt-enable boundary on x86 is two
384 instructions from sti] - to flush out pending hardirqs and
385 IPIs. After this point nothing is supposed to reach this CPU." */
386 __asm__ __volatile__("sti; nop; cli");
387 barrier();
388#else
389 /* That doesn't seem sufficient. Give it 1ms. */
390 local_irq_enable();
391 mdelay(1);
392 local_irq_disable();
393#endif
394}
395#endif
396