blob: 0b254de8408334f01c9a386101b5f97a78d6240d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
3 *
4 * This file contains the lowest level x86_64-specific interrupt
5 * entry and irq statistics code. All the remaining irq logic is
6 * done by the generic kernel/irq/ code and in the
7 * x86_64-specific irq controller code. (e.g. i8259.c and
8 * io_apic.c.)
9 */
10
11#include <linux/kernel_stat.h>
12#include <linux/interrupt.h>
13#include <linux/seq_file.h>
14#include <linux/module.h>
Ashok Raj76e4f662005-06-25 14:55:00 -070015#include <linux/delay.h>
Frederic Weisbeckerbcbc4f22008-12-09 23:54:20 +010016#include <linux/ftrace.h>
Jaswinder Singh Rajput5f66b2a2009-01-04 16:25:19 +053017#include <linux/uaccess.h>
18#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/io_apic.h>
Andi Kleen95833c82006-01-11 22:44:36 +010020#include <asm/idle.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Brian Gerst1b437c82009-01-19 00:38:57 +090022DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
23EXPORT_PER_CPU_SYMBOL(irq_stat);
24
Brian Gerstd650a512009-01-21 17:26:06 +090025DEFINE_PER_CPU(struct pt_regs *, irq_regs);
26EXPORT_PER_CPU_SYMBOL(irq_regs);
27
Eric Sandeen4961f102006-06-26 14:00:05 +020028/*
29 * Probabilistic stack overflow check:
30 *
31 * Only check the stack in process context, because everything else
32 * runs on the big interrupt stacks. Checking reliably is too expensive,
33 * so we just check from interrupts.
34 */
35static inline void stack_overflow_check(struct pt_regs *regs)
36{
Ingo Molnarf377fa12008-11-23 09:02:26 +010037#ifdef CONFIG_DEBUG_STACKOVERFLOW
Roman Zippelc9f4f062007-05-09 02:35:16 -070038 u64 curbase = (u64)task_stack_page(current);
Eric Sandeen4961f102006-06-26 14:00:05 +020039
Ingo Molnarf377fa12008-11-23 09:02:26 +010040 WARN_ONCE(regs->sp >= curbase &&
41 regs->sp <= curbase + THREAD_SIZE &&
42 regs->sp < curbase + sizeof(struct thread_info) +
43 sizeof(struct pt_regs) + 128,
44
45 "do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n",
46 current->comm, curbase, regs->sp);
Eric Sandeen4961f102006-06-26 14:00:05 +020047#endif
Ingo Molnarf377fa12008-11-23 09:02:26 +010048}
Eric Sandeen4961f102006-06-26 14:00:05 +020049
Linus Torvalds1da177e2005-04-16 15:20:36 -070050/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 * do_IRQ handles all normal device IRQ's (the special
52 * SMP cross-CPU interrupts have their own specific
53 * handlers).
54 */
Frederic Weisbeckerbcbc4f22008-12-09 23:54:20 +010055asmlinkage unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
David Howells7d12e782006-10-05 14:55:46 +010056{
57 struct pt_regs *old_regs = set_irq_regs(regs);
Yinghai Lu46926b62008-08-19 20:50:15 -070058 struct irq_desc *desc;
David Howells7d12e782006-10-05 14:55:46 +010059
Rusty Russell19eadf92006-06-27 02:53:44 -070060 /* high bit used in ret_from_ code */
H. Peter Anvin65ea5b02008-01-30 13:30:56 +010061 unsigned vector = ~regs->orig_ax;
Eric W. Biedermane500f572006-10-04 02:16:50 -070062 unsigned irq;
63
64 exit_idle();
65 irq_enter();
Eric W. Biederman550f2292006-10-04 02:16:51 -070066 irq = __get_cpu_var(vector_irq)[vector];
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Eric Sandeen4961f102006-06-26 14:00:05 +020068 stack_overflow_check(regs);
Eric W. Biedermand3696cf2006-10-08 23:41:59 -060069
Yinghai Lucb5bc832008-08-19 20:50:17 -070070 desc = irq_to_desc(irq);
Yinghai Lu46926b62008-08-19 20:50:15 -070071 if (likely(desc))
72 generic_handle_irq_desc(irq, desc);
Eric W. Biederman2fb12a92007-02-13 13:26:25 +010073 else {
74 if (!disable_apic)
75 ack_APIC_irq();
76
77 if (printk_ratelimit())
78 printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
79 __func__, smp_processor_id(), vector);
80 }
Eric W. Biedermand3696cf2006-10-08 23:41:59 -060081
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 irq_exit();
83
David Howells7d12e782006-10-05 14:55:46 +010084 set_irq_regs(old_regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 return 1;
86}
87
Ashok Raj76e4f662005-06-25 14:55:00 -070088#ifdef CONFIG_HOTPLUG_CPU
Mike Travisd7b381b2008-12-16 17:33:58 -080089/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
90void fixup_irqs(void)
Ashok Raj76e4f662005-06-25 14:55:00 -070091{
92 unsigned int irq;
93 static int warned;
Yinghai Lu2c6927a2008-08-19 20:50:11 -070094 struct irq_desc *desc;
Ashok Raj76e4f662005-06-25 14:55:00 -070095
Yinghai Lu2c6927a2008-08-19 20:50:11 -070096 for_each_irq_desc(irq, desc) {
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -070097 int break_affinity = 0;
98 int set_affinity = 1;
Mike Travisd7b381b2008-12-16 17:33:58 -080099 const struct cpumask *affinity;
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700100
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800101 if (!desc)
102 continue;
Ashok Raj76e4f662005-06-25 14:55:00 -0700103 if (irq == 2)
104 continue;
105
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700106 /* interrupt's are disabled at this point */
Yinghai Lu08678b02008-08-19 20:50:05 -0700107 spin_lock(&desc->lock);
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700108
Mike Travis7f7ace02009-01-10 21:58:08 -0800109 affinity = desc->affinity;
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700110 if (!irq_has_action(irq) ||
Mike Travisd7b381b2008-12-16 17:33:58 -0800111 cpumask_equal(affinity, cpu_online_mask)) {
Yinghai Lu08678b02008-08-19 20:50:05 -0700112 spin_unlock(&desc->lock);
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700113 continue;
114 }
115
Mike Travisd7b381b2008-12-16 17:33:58 -0800116 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700117 break_affinity = 1;
Mike Travisd7b381b2008-12-16 17:33:58 -0800118 affinity = cpu_all_mask;
Ashok Raj76e4f662005-06-25 14:55:00 -0700119 }
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700120
Yinghai Lu08678b02008-08-19 20:50:05 -0700121 if (desc->chip->mask)
122 desc->chip->mask(irq);
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700123
Yinghai Lu08678b02008-08-19 20:50:05 -0700124 if (desc->chip->set_affinity)
Mike Travisd7b381b2008-12-16 17:33:58 -0800125 desc->chip->set_affinity(irq, affinity);
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700126 else if (!(warned++))
127 set_affinity = 0;
128
Yinghai Lu08678b02008-08-19 20:50:05 -0700129 if (desc->chip->unmask)
130 desc->chip->unmask(irq);
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700131
Yinghai Lu08678b02008-08-19 20:50:05 -0700132 spin_unlock(&desc->lock);
Siddha, Suresh B48d8d7e2007-06-25 15:52:35 -0700133
134 if (break_affinity && set_affinity)
135 printk("Broke affinity for irq %i\n", irq);
136 else if (!set_affinity)
Ashok Raj76e4f662005-06-25 14:55:00 -0700137 printk("Cannot set affinity for irq %i\n", irq);
138 }
139
140 /* That doesn't seem sufficient. Give it 1ms. */
141 local_irq_enable();
142 mdelay(1);
143 local_irq_disable();
144}
145#endif
Andi Kleened6b6762005-07-28 21:15:49 -0700146
147extern void call_softirq(void);
148
149asmlinkage void do_softirq(void)
150{
Jaswinder Singh Rajput5f66b2a2009-01-04 16:25:19 +0530151 __u32 pending;
152 unsigned long flags;
Andi Kleened6b6762005-07-28 21:15:49 -0700153
Jaswinder Singh Rajput5f66b2a2009-01-04 16:25:19 +0530154 if (in_interrupt())
155 return;
Andi Kleened6b6762005-07-28 21:15:49 -0700156
Jaswinder Singh Rajput5f66b2a2009-01-04 16:25:19 +0530157 local_irq_save(flags);
158 pending = local_softirq_pending();
159 /* Switch to interrupt stack */
160 if (pending) {
Andi Kleened6b6762005-07-28 21:15:49 -0700161 call_softirq();
Ingo Molnar2601e642006-07-03 00:24:45 -0700162 WARN_ON_ONCE(softirq_count());
163 }
Jaswinder Singh Rajput5f66b2a2009-01-04 16:25:19 +0530164 local_irq_restore(flags);
Andi Kleened6b6762005-07-28 21:15:49 -0700165}