blob: f58b995b30ee6adc7598ed98eaa09ad838a1726d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
3 *
4 * This file contains the lowest level x86_64-specific interrupt
5 * entry and irq statistics code. All the remaining irq logic is
6 * done by the generic kernel/irq/ code and in the
7 * x86_64-specific irq controller code. (e.g. i8259.c and
8 * io_apic.c.)
9 */
10
11#include <linux/kernel_stat.h>
12#include <linux/interrupt.h>
13#include <linux/seq_file.h>
14#include <linux/module.h>
Ashok Raj76e4f662005-06-25 14:55:00 -070015#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <asm/uaccess.h>
17#include <asm/io_apic.h>
Andi Kleen95833c82006-01-11 22:44:36 +010018#include <asm/idle.h>
Eric W. Biederman2fb12a92007-02-13 13:26:25 +010019#include <asm/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
21atomic_t irq_err_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Thomas Gleixner87ebecf2008-01-30 13:30:19 +010023/*
24 * 'what should we do if we get a hw irq event on an illegal vector'.
25 * each architecture has to answer this themselves.
26 */
27void ack_bad_irq(unsigned int irq)
28{
29 printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq);
30 /*
31 * Currently unexpected vectors happen only on SMP and APIC.
32 * We _must_ ack these because every local APIC has only N
33 * irq slots per priority level, and a 'hanging, unacked' IRQ
34 * holds up an irq slot - in excessive cases (when multiple
35 * unexpected vectors occur) that might lock up the APIC
36 * completely.
37 * But don't ack when the APIC is disabled. -AK
38 */
39 if (!disable_apic)
40 ack_APIC_irq();
41}
42
Eric Sandeen4961f102006-06-26 14:00:05 +020043#ifdef CONFIG_DEBUG_STACKOVERFLOW
44/*
45 * Probabilistic stack overflow check:
46 *
47 * Only check the stack in process context, because everything else
48 * runs on the big interrupt stacks. Checking reliably is too expensive,
49 * so we just check from interrupts.
50 */
51static inline void stack_overflow_check(struct pt_regs *regs)
52{
Roman Zippelc9f4f062007-05-09 02:35:16 -070053 u64 curbase = (u64)task_stack_page(current);
Eric Sandeen4961f102006-06-26 14:00:05 +020054 static unsigned long warned = -60*HZ;
55
H. Peter Anvin65ea5b02008-01-30 13:30:56 +010056 if (regs->sp >= curbase && regs->sp <= curbase + THREAD_SIZE &&
57 regs->sp < curbase + sizeof(struct thread_info) + 128 &&
Eric Sandeen4961f102006-06-26 14:00:05 +020058 time_after(jiffies, warned + 60*HZ)) {
H. Peter Anvin65ea5b02008-01-30 13:30:56 +010059 printk("do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n",
60 current->comm, curbase, regs->sp);
Eric Sandeen4961f102006-06-26 14:00:05 +020061 show_stack(NULL,NULL);
62 warned = jiffies;
63 }
64}
65#endif
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067/*
68 * Generic, controller-independent functions:
69 */
70
71int show_interrupts(struct seq_file *p, void *v)
72{
73 int i = *(loff_t *) v, j;
74 struct irqaction * action;
75 unsigned long flags;
76
77 if (i == 0) {
78 seq_printf(p, " ");
Andrew Morton394e3902006-03-23 03:01:05 -080079 for_each_online_cpu(j)
Jan Beulichbdbdaa72006-06-26 13:59:23 +020080 seq_printf(p, "CPU%-8d",j);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 seq_putc(p, '\n');
82 }
83
Yinghai Lu0799e432008-08-19 20:49:48 -070084 if (i < nr_irqs) {
Jan Beulich072f5d82007-10-17 18:04:40 +020085 unsigned any_count = 0;
Yinghai Lu08678b02008-08-19 20:50:05 -070086 struct irq_desc *desc = irq_to_desc(i);
Jan Beulich072f5d82007-10-17 18:04:40 +020087
Yinghai Lu08678b02008-08-19 20:50:05 -070088 spin_lock_irqsave(&desc->lock, flags);
Jan Beulich072f5d82007-10-17 18:04:40 +020089#ifndef CONFIG_SMP
90 any_count = kstat_irqs(i);
91#else
92 for_each_online_cpu(j)
Yinghai Lu7f95ec92008-08-19 20:50:09 -070093 any_count |= kstat_irqs_cpu(i, j);
Jan Beulich072f5d82007-10-17 18:04:40 +020094#endif
Yinghai Lu08678b02008-08-19 20:50:05 -070095 action = desc->action;
Jan Beulich072f5d82007-10-17 18:04:40 +020096 if (!action && !any_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 goto skip;
98 seq_printf(p, "%3d: ",i);
99#ifndef CONFIG_SMP
100 seq_printf(p, "%10u ", kstat_irqs(i));
101#else
Andrew Morton394e3902006-03-23 03:01:05 -0800102 for_each_online_cpu(j)
Yinghai Lu7f95ec92008-08-19 20:50:09 -0700103 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#endif
Yinghai Lu08678b02008-08-19 20:50:05 -0700105 seq_printf(p, " %8s", desc->chip->name);
106 seq_printf(p, "-%-8s", desc->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Jan Beulich072f5d82007-10-17 18:04:40 +0200108 if (action) {
109 seq_printf(p, " %s", action->name);
110 while ((action = action->next) != NULL)
111 seq_printf(p, ", %s", action->name);
112 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 seq_putc(p, '\n');
114skip:
Yinghai Lu08678b02008-08-19 20:50:05 -0700115 spin_unlock_irqrestore(&desc->lock, flags);
Yinghai Lu0799e432008-08-19 20:49:48 -0700116 } else if (i == nr_irqs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 seq_printf(p, "NMI: ");
Andrew Morton394e3902006-03-23 03:01:05 -0800118 for_each_online_cpu(j)
119 seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
Joe Korty38e760a2007-10-17 18:04:40 +0200120 seq_printf(p, " Non-maskable interrupts\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 seq_printf(p, "LOC: ");
Andrew Morton394e3902006-03-23 03:01:05 -0800122 for_each_online_cpu(j)
123 seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
Joe Korty38e760a2007-10-17 18:04:40 +0200124 seq_printf(p, " Local timer interrupts\n");
125#ifdef CONFIG_SMP
126 seq_printf(p, "RES: ");
127 for_each_online_cpu(j)
128 seq_printf(p, "%10u ", cpu_pda(j)->irq_resched_count);
129 seq_printf(p, " Rescheduling interrupts\n");
130 seq_printf(p, "CAL: ");
131 for_each_online_cpu(j)
132 seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count);
Andi Kleendc44e652008-09-04 13:47:38 +0200133 seq_printf(p, " Function call interrupts\n");
Joe Korty38e760a2007-10-17 18:04:40 +0200134 seq_printf(p, "TLB: ");
135 for_each_online_cpu(j)
136 seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count);
137 seq_printf(p, " TLB shootdowns\n");
138#endif
Jan Beulicha2eddfa2008-05-12 15:44:41 +0200139#ifdef CONFIG_X86_MCE
Joe Korty38e760a2007-10-17 18:04:40 +0200140 seq_printf(p, "TRM: ");
141 for_each_online_cpu(j)
142 seq_printf(p, "%10u ", cpu_pda(j)->irq_thermal_count);
143 seq_printf(p, " Thermal event interrupts\n");
144 seq_printf(p, "THR: ");
145 for_each_online_cpu(j)
146 seq_printf(p, "%10u ", cpu_pda(j)->irq_threshold_count);
147 seq_printf(p, " Threshold APIC interrupts\n");
Jan Beulicha2eddfa2008-05-12 15:44:41 +0200148#endif
Joe Korty38e760a2007-10-17 18:04:40 +0200149 seq_printf(p, "SPU: ");
150 for_each_online_cpu(j)
151 seq_printf(p, "%10u ", cpu_pda(j)->irq_spurious_count);
152 seq_printf(p, " Spurious interrupts\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 }
155 return 0;
156}
157
158/*
Jan Beulicha2eddfa2008-05-12 15:44:41 +0200159 * /proc/stat helpers
160 */
161u64 arch_irq_stat_cpu(unsigned int cpu)
162{
163 u64 sum = cpu_pda(cpu)->__nmi_count;
164
165 sum += cpu_pda(cpu)->apic_timer_irqs;
166#ifdef CONFIG_SMP
167 sum += cpu_pda(cpu)->irq_resched_count;
168 sum += cpu_pda(cpu)->irq_call_count;
169 sum += cpu_pda(cpu)->irq_tlb_count;
170#endif
171#ifdef CONFIG_X86_MCE
172 sum += cpu_pda(cpu)->irq_thermal_count;
173 sum += cpu_pda(cpu)->irq_threshold_count;
174#endif
175 sum += cpu_pda(cpu)->irq_spurious_count;
176 return sum;
177}
178
179u64 arch_irq_stat(void)
180{
181 return atomic_read(&irq_err_count);
182}
183
184/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 * do_IRQ handles all normal device IRQ's (the special
186 * SMP cross-CPU interrupts have their own specific
187 * handlers).
188 */
189asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
David Howells7d12e782006-10-05 14:55:46 +0100190{
191 struct pt_regs *old_regs = set_irq_regs(regs);
Yinghai Lu46926b62008-08-19 20:50:15 -0700192 struct irq_desc *desc;
David Howells7d12e782006-10-05 14:55:46 +0100193
Rusty Russell19eadf92006-06-27 02:53:44 -0700194 /* high bit used in ret_from_ code */
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100195 unsigned vector = ~regs->orig_ax;
Eric W. Biedermane500f572006-10-04 02:16:50 -0700196 unsigned irq;
197
198 exit_idle();
199 irq_enter();
Eric W. Biederman550f2292006-10-04 02:16:51 -0700200 irq = __get_cpu_var(vector_irq)[vector];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Eric Sandeen4961f102006-06-26 14:00:05 +0200202#ifdef CONFIG_DEBUG_STACKOVERFLOW
203 stack_overflow_check(regs);
204#endif
Eric W. Biedermand3696cf2006-10-08 23:41:59 -0600205
Yinghai Lu46926b62008-08-19 20:50:15 -0700206 desc = __irq_to_desc(irq);
207 if (likely(desc))
208 generic_handle_irq_desc(irq, desc);
Eric W. Biederman2fb12a92007-02-13 13:26:25 +0100209 else {
210 if (!disable_apic)
211 ack_APIC_irq();
212
213 if (printk_ratelimit())
214 printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
215 __func__, smp_processor_id(), vector);
216 }
Eric W. Biedermand3696cf2006-10-08 23:41:59 -0600217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 irq_exit();
219
David Howells7d12e782006-10-05 14:55:46 +0100220 set_irq_regs(old_regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 return 1;
222}
223
Ashok Raj76e4f662005-06-25 14:55:00 -0700224#ifdef CONFIG_HOTPLUG_CPU
225void fixup_irqs(cpumask_t map)
226{
227 unsigned int irq;
228 static int warned;
Yinghai Lu2c6927a2008-08-19 20:50:11 -0700229 struct irq_desc *desc;
Ashok Raj76e4f662005-06-25 14:55:00 -0700230
Yinghai Lu2c6927a2008-08-19 20:50:11 -0700231 for_each_irq_desc(irq, desc) {
Ashok Raj76e4f662005-06-25 14:55:00 -0700232 cpumask_t mask;
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700233 int break_affinity = 0;
234 int set_affinity = 1;
235
Ashok Raj76e4f662005-06-25 14:55:00 -0700236 if (irq == 2)
237 continue;
238
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700239 /* interrupt's are disabled at this point */
Yinghai Lu08678b02008-08-19 20:50:05 -0700240 spin_lock(&desc->lock);
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700241
242 if (!irq_has_action(irq) ||
Yinghai Lu08678b02008-08-19 20:50:05 -0700243 cpus_equal(desc->affinity, map)) {
244 spin_unlock(&desc->lock);
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700245 continue;
246 }
247
Yinghai Lu08678b02008-08-19 20:50:05 -0700248 cpus_and(mask, desc->affinity, map);
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700249 if (cpus_empty(mask)) {
250 break_affinity = 1;
Ashok Raj76e4f662005-06-25 14:55:00 -0700251 mask = map;
252 }
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700253
Yinghai Lu08678b02008-08-19 20:50:05 -0700254 if (desc->chip->mask)
255 desc->chip->mask(irq);
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700256
Yinghai Lu08678b02008-08-19 20:50:05 -0700257 if (desc->chip->set_affinity)
258 desc->chip->set_affinity(irq, mask);
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700259 else if (!(warned++))
260 set_affinity = 0;
261
Yinghai Lu08678b02008-08-19 20:50:05 -0700262 if (desc->chip->unmask)
263 desc->chip->unmask(irq);
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700264
Yinghai Lu08678b02008-08-19 20:50:05 -0700265 spin_unlock(&desc->lock);
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700266
267 if (break_affinity && set_affinity)
268 printk("Broke affinity for irq %i\n", irq);
269 else if (!set_affinity)
Ashok Raj76e4f662005-06-25 14:55:00 -0700270 printk("Cannot set affinity for irq %i\n", irq);
271 }
272
273 /* That doesn't seem sufficient. Give it 1ms. */
274 local_irq_enable();
275 mdelay(1);
276 local_irq_disable();
277}
278#endif
Andi Kleened6b6762005-07-28 21:15:49 -0700279
280extern void call_softirq(void);
281
282asmlinkage void do_softirq(void)
283{
284 __u32 pending;
285 unsigned long flags;
286
287 if (in_interrupt())
288 return;
289
290 local_irq_save(flags);
291 pending = local_softirq_pending();
292 /* Switch to interrupt stack */
Ingo Molnar2601e642006-07-03 00:24:45 -0700293 if (pending) {
Andi Kleened6b6762005-07-28 21:15:49 -0700294 call_softirq();
Ingo Molnar2601e642006-07-03 00:24:45 -0700295 WARN_ON_ONCE(softirq_count());
296 }
Andi Kleened6b6762005-07-28 21:15:49 -0700297 local_irq_restore(flags);
298}