blob: 5785d84103a6d8f37e1bb37465a517497333e11b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/i386/kernel/irq.c
3 *
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains the lowest level x86-specific interrupt
7 * entry, irq-stacks and irq statistics code. All the remaining
8 * irq logic is done by the generic kernel/irq/ code and
9 * by the x86-specific irq controller code. (e.g. i8259.c and
10 * io_apic.c.)
11 */
12
13#include <asm/uaccess.h>
14#include <linux/module.h>
15#include <linux/seq_file.h>
16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h>
Zwane Mwaikambof3705132005-06-25 14:54:50 -070018#include <linux/notifier.h>
19#include <linux/cpu.h>
20#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Stephane Eranian2ff2d3d2007-02-13 13:26:22 +010022#include <asm/idle.h>
23
Ravikiran G Thirumalai22fc6ec2006-01-08 01:01:27 -080024DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070025EXPORT_PER_CPU_SYMBOL(irq_stat);
26
27#ifndef CONFIG_X86_LOCAL_APIC
28/*
29 * 'what should we do if we get a hw irq event on an illegal vector'.
30 * each architecture has to answer this themselves.
31 */
32void ack_bad_irq(unsigned int irq)
33{
34 printk("unexpected IRQ trap at vector %02x\n", irq);
35}
36#endif
37
38#ifdef CONFIG_4KSTACKS
39/*
40 * per-CPU IRQ handling contexts (thread information and stack)
41 */
42union irq_ctx {
43 struct thread_info tinfo;
44 u32 stack[THREAD_SIZE/sizeof(u32)];
45};
46
Andreas Mohr22722052006-06-23 02:05:30 -070047static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
48static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#endif
50
51/*
52 * do_IRQ handles all normal device IRQ's (the special
53 * SMP cross-CPU interrupts have their own specific
54 * handlers).
55 */
56fastcall unsigned int do_IRQ(struct pt_regs *regs)
57{
David Howells7d12e782006-10-05 14:55:46 +010058 struct pt_regs *old_regs;
Rusty Russell19eadf92006-06-27 02:53:44 -070059 /* high bit used in ret_from_ code */
60 int irq = ~regs->orig_eax;
Ingo Molnarf5b9ed72006-10-04 02:16:26 -070061 struct irq_desc *desc = irq_desc + irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062#ifdef CONFIG_4KSTACKS
63 union irq_ctx *curctx, *irqctx;
64 u32 *isp;
65#endif
Stephane Eranian2ff2d3d2007-02-13 13:26:22 +010066 exit_idle();
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Andrew Mortona052b682006-06-28 04:26:43 -070068 if (unlikely((unsigned)irq >= NR_IRQS)) {
69 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
70 __FUNCTION__, irq);
71 BUG();
72 }
73
David Howells7d12e782006-10-05 14:55:46 +010074 old_regs = set_irq_regs(regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 irq_enter();
76#ifdef CONFIG_DEBUG_STACKOVERFLOW
77 /* Debugging check for stack overflow: is there less than 1KB free? */
78 {
79 long esp;
80
81 __asm__ __volatile__("andl %%esp,%0" :
82 "=r" (esp) : "0" (THREAD_SIZE - 1));
83 if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
84 printk("do_IRQ: stack overflow: %ld\n",
85 esp - sizeof(struct thread_info));
86 dump_stack();
87 }
88 }
89#endif
90
91#ifdef CONFIG_4KSTACKS
92
93 curctx = (union irq_ctx *) current_thread_info();
94 irqctx = hardirq_ctx[smp_processor_id()];
95
96 /*
97 * this is where we switch to the IRQ stack. However, if we are
98 * already using the IRQ stack (because we interrupted a hardirq
99 * handler) we can't do that and just have to keep using the
100 * current stack (which is the irq stack already after all)
101 */
102 if (curctx != irqctx) {
David Howells7d12e782006-10-05 14:55:46 +0100103 int arg1, arg2, ebx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
105 /* build the stack frame on the IRQ stack */
106 isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
107 irqctx->tinfo.task = curctx->tinfo.task;
108 irqctx->tinfo.previous_esp = current_stack_pointer;
109
Björn Steinbrinka5d157e2006-06-25 16:24:40 +0200110 /*
111 * Copy the softirq bits in preempt_count so that the
112 * softirq checks work in the hardirq context.
113 */
114 irqctx->tinfo.preempt_count =
Andrew Morton91bf4602006-06-27 02:55:09 -0700115 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
116 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
Björn Steinbrinka5d157e2006-06-25 16:24:40 +0200117
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 asm volatile(
Ingo Molnarf5b9ed72006-10-04 02:16:26 -0700119 " xchgl %%ebx,%%esp \n"
120 " call *%%edi \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 " movl %%ebx,%%esp \n"
David Howells7d12e782006-10-05 14:55:46 +0100122 : "=a" (arg1), "=d" (arg2), "=b" (ebx)
123 : "0" (irq), "1" (desc), "2" (isp),
Ingo Molnarf5b9ed72006-10-04 02:16:26 -0700124 "D" (desc->handle_irq)
125 : "memory", "cc"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 );
127 } else
128#endif
David Howells7d12e782006-10-05 14:55:46 +0100129 desc->handle_irq(irq, desc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
131 irq_exit();
David Howells7d12e782006-10-05 14:55:46 +0100132 set_irq_regs(old_regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 return 1;
134}
135
136#ifdef CONFIG_4KSTACKS
137
138/*
139 * These should really be __section__(".bss.page_aligned") as well, but
140 * gcc's 3.0 and earlier don't handle that correctly.
141 */
142static char softirq_stack[NR_CPUS * THREAD_SIZE]
143 __attribute__((__aligned__(THREAD_SIZE)));
144
145static char hardirq_stack[NR_CPUS * THREAD_SIZE]
146 __attribute__((__aligned__(THREAD_SIZE)));
147
148/*
149 * allocate per-cpu stacks for hardirq and for softirq processing
150 */
151void irq_ctx_init(int cpu)
152{
153 union irq_ctx *irqctx;
154
155 if (hardirq_ctx[cpu])
156 return;
157
158 irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
159 irqctx->tinfo.task = NULL;
160 irqctx->tinfo.exec_domain = NULL;
161 irqctx->tinfo.cpu = cpu;
162 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
163 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
164
165 hardirq_ctx[cpu] = irqctx;
166
167 irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
168 irqctx->tinfo.task = NULL;
169 irqctx->tinfo.exec_domain = NULL;
170 irqctx->tinfo.cpu = cpu;
Ingo Molnar55f327f2006-07-03 00:24:43 -0700171 irqctx->tinfo.preempt_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
173
174 softirq_ctx[cpu] = irqctx;
175
176 printk("CPU %u irqstacks, hard=%p soft=%p\n",
177 cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
178}
179
Li Shaohuae1367da2005-06-25 14:54:56 -0700180void irq_ctx_exit(int cpu)
181{
182 hardirq_ctx[cpu] = NULL;
183}
184
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185extern asmlinkage void __do_softirq(void);
186
187asmlinkage void do_softirq(void)
188{
189 unsigned long flags;
190 struct thread_info *curctx;
191 union irq_ctx *irqctx;
192 u32 *isp;
193
194 if (in_interrupt())
195 return;
196
197 local_irq_save(flags);
198
199 if (local_softirq_pending()) {
200 curctx = current_thread_info();
201 irqctx = softirq_ctx[smp_processor_id()];
202 irqctx->tinfo.task = curctx->task;
203 irqctx->tinfo.previous_esp = current_stack_pointer;
204
205 /* build the stack frame on the softirq stack */
206 isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
207
208 asm volatile(
209 " xchgl %%ebx,%%esp \n"
210 " call __do_softirq \n"
211 " movl %%ebx,%%esp \n"
212 : "=b"(isp)
213 : "0"(isp)
214 : "memory", "cc", "edx", "ecx", "eax"
215 );
Ingo Molnar55f327f2006-07-03 00:24:43 -0700216 /*
217 * Shouldnt happen, we returned above if in_interrupt():
218 */
219 WARN_ON_ONCE(softirq_count());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 }
221
222 local_irq_restore(flags);
223}
224
225EXPORT_SYMBOL(do_softirq);
226#endif
227
228/*
229 * Interrupt statistics:
230 */
231
232atomic_t irq_err_count;
233
234/*
235 * /proc/interrupts printing:
236 */
237
238int show_interrupts(struct seq_file *p, void *v)
239{
240 int i = *(loff_t *) v, j;
241 struct irqaction * action;
242 unsigned long flags;
243
244 if (i == 0) {
245 seq_printf(p, " ");
Natalie Protasevich9f40a722005-10-30 14:59:32 -0800246 for_each_online_cpu(j)
Jan Beulichbdbdaa72006-06-26 13:59:23 +0200247 seq_printf(p, "CPU%-8d",j);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 seq_putc(p, '\n');
249 }
250
251 if (i < NR_IRQS) {
252 spin_lock_irqsave(&irq_desc[i].lock, flags);
253 action = irq_desc[i].action;
254 if (!action)
255 goto skip;
256 seq_printf(p, "%3d: ",i);
257#ifndef CONFIG_SMP
258 seq_printf(p, "%10u ", kstat_irqs(i));
259#else
Natalie Protasevich9f40a722005-10-30 14:59:32 -0800260 for_each_online_cpu(j)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700261 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262#endif
Ingo Molnarf5b9ed72006-10-04 02:16:26 -0700263 seq_printf(p, " %8s", irq_desc[i].chip->name);
Ingo Molnara460e742006-10-17 00:10:03 -0700264 seq_printf(p, "-%-8s", irq_desc[i].name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 seq_printf(p, " %s", action->name);
266
267 for (action=action->next; action; action = action->next)
268 seq_printf(p, ", %s", action->name);
269
270 seq_putc(p, '\n');
271skip:
272 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
273 } else if (i == NR_IRQS) {
274 seq_printf(p, "NMI: ");
Natalie Protasevich9f40a722005-10-30 14:59:32 -0800275 for_each_online_cpu(j)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700276 seq_printf(p, "%10u ", nmi_count(j));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 seq_putc(p, '\n');
278#ifdef CONFIG_X86_LOCAL_APIC
279 seq_printf(p, "LOC: ");
Natalie Protasevich9f40a722005-10-30 14:59:32 -0800280 for_each_online_cpu(j)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700281 seq_printf(p, "%10u ",
282 per_cpu(irq_stat,j).apic_timer_irqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 seq_putc(p, '\n');
284#endif
285 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
286#if defined(CONFIG_X86_IO_APIC)
287 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
288#endif
289 }
290 return 0;
291}
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700292
293#ifdef CONFIG_HOTPLUG_CPU
294#include <mach_apic.h>
295
296void fixup_irqs(cpumask_t map)
297{
298 unsigned int irq;
299 static int warned;
300
301 for (irq = 0; irq < NR_IRQS; irq++) {
302 cpumask_t mask;
303 if (irq == 2)
304 continue;
305
Ingo Molnara53da522006-06-29 02:24:38 -0700306 cpus_and(mask, irq_desc[irq].affinity, map);
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700307 if (any_online_cpu(mask) == NR_CPUS) {
308 printk("Breaking affinity for irq %i\n", irq);
309 mask = map;
310 }
Ingo Molnard1bef4e2006-06-29 02:24:36 -0700311 if (irq_desc[irq].chip->set_affinity)
312 irq_desc[irq].chip->set_affinity(irq, mask);
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700313 else if (irq_desc[irq].action && !(warned++))
314 printk("Cannot set affinity for irq %i\n", irq);
315 }
316
317#if 0
318 barrier();
319 /* Ingo Molnar says: "after the IO-APIC masks have been redirected
320 [note the nop - the interrupt-enable boundary on x86 is two
321 instructions from sti] - to flush out pending hardirqs and
322 IPIs. After this point nothing is supposed to reach this CPU." */
323 __asm__ __volatile__("sti; nop; cli");
324 barrier();
325#else
326 /* That doesn't seem sufficient. Give it 1ms. */
327 local_irq_enable();
328 mdelay(1);
329 local_irq_disable();
330#endif
331}
332#endif
333