blob: e16e99ebd7ad3b158d90c6cd9951d0bb3feee659 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
3 *
4 * This file contains the lowest level x86-specific interrupt
5 * entry, irq-stacks and irq statistics code. All the remaining
6 * irq logic is done by the generic kernel/irq/ code and
7 * by the x86-specific irq controller code. (e.g. i8259.c and
8 * io_apic.c.)
9 */
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/module.h>
12#include <linux/seq_file.h>
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
Zwane Mwaikambof3705132005-06-25 14:54:50 -070015#include <linux/notifier.h>
16#include <linux/cpu.h>
17#include <linux/delay.h>
Jaswinder Singh Rajput72ade5f2009-01-04 16:32:36 +053018#include <linux/uaccess.h>
Lai Jiangshan42f8fae2009-02-17 11:46:42 +080019#include <linux/percpu.h>
Eric Dumazet5c1eb082010-10-28 16:40:54 +020020#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Thomas Gleixnere05d7232007-02-16 01:27:58 -080022#include <asm/apic.h>
Thomas Gleixnere05d7232007-02-16 01:27:58 -080023
Fenghua Yuf34e3b62007-07-19 01:48:13 -070024DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
Linus Torvalds1da177e2005-04-16 15:20:36 -070025EXPORT_PER_CPU_SYMBOL(irq_stat);
26
Jeremy Fitzhardinge7c3576d2007-05-02 19:27:16 +020027DEFINE_PER_CPU(struct pt_regs *, irq_regs);
28EXPORT_PER_CPU_SYMBOL(irq_regs);
29
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +020030#ifdef CONFIG_DEBUG_STACKOVERFLOW
31/* Debugging check for stack overflow: is there less than 1KB free? */
32static int check_stack_overflow(void)
33{
34 long sp;
35
36 __asm__ __volatile__("andl %%esp,%0" :
37 "=r" (sp) : "0" (THREAD_SIZE - 1));
38
39 return sp < (sizeof(struct thread_info) + STACK_WARN);
40}
41
42static void print_stack_overflow(void)
43{
44 printk(KERN_WARNING "low stack detected by irq handler\n");
45 dump_stack();
Mitsuo Hayasaka55af7792011-11-29 15:08:36 +090046 if (sysctl_panic_on_stackoverflow)
47 panic("low stack detected by irq handler - check messages\n");
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +020048}
49
50#else
51static inline int check_stack_overflow(void) { return 0; }
52static inline void print_stack_overflow(void) { }
53#endif
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/*
56 * per-CPU IRQ handling contexts (thread information and stack)
57 */
58union irq_ctx {
59 struct thread_info tinfo;
60 u32 stack[THREAD_SIZE/sizeof(u32)];
Christoph Hellwig25897372010-07-27 14:13:13 +020061} __attribute__((aligned(THREAD_SIZE)));
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Lai Jiangshan42f8fae2009-02-17 11:46:42 +080063static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
64static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Thomas Gleixner403d8ef2008-05-05 18:13:50 +020066static void call_on_stack(void *func, void *stack)
67{
68 asm volatile("xchgl %%ebx,%%esp \n"
69 "call *%%edi \n"
70 "movl %%ebx,%%esp \n"
71 : "=b" (stack)
72 : "0" (stack),
73 "D"(func)
74 : "memory", "cc", "edx", "ecx", "eax");
Andi Kleen04b361a2008-05-05 12:36:38 +020075}
76
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +020077static inline int
78execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
79{
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 union irq_ctx *curctx, *irqctx;
Thomas Gleixner403d8ef2008-05-05 18:13:50 +020081 u32 *isp, arg1, arg2;
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
83 curctx = (union irq_ctx *) current_thread_info();
Tejun Heo0a3aee02010-12-18 16:28:55 +010084 irqctx = __this_cpu_read(hardirq_ctx);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
86 /*
87 * this is where we switch to the IRQ stack. However, if we are
88 * already using the IRQ stack (because we interrupted a hardirq
89 * handler) we can't do that and just have to keep using the
90 * current stack (which is the irq stack already after all)
91 */
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +020092 if (unlikely(curctx == irqctx))
93 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +020095 /* build the stack frame on the IRQ stack */
Jaswinder Singh Rajput72ade5f2009-01-04 16:32:36 +053096 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +020097 irqctx->tinfo.task = curctx->tinfo.task;
98 irqctx->tinfo.previous_esp = current_stack_pointer;
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +0200100 /*
101 * Copy the softirq bits in preempt_count so that the
102 * softirq checks work in the hardirq context.
103 */
104 irqctx->tinfo.preempt_count =
105 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
106 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
Björn Steinbrinka5d157e2006-06-25 16:24:40 +0200107
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +0200108 if (unlikely(overflow))
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200109 call_on_stack(print_stack_overflow, isp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200111 asm volatile("xchgl %%ebx,%%esp \n"
112 "call *%%edi \n"
113 "movl %%ebx,%%esp \n"
114 : "=a" (arg1), "=d" (arg2), "=b" (isp)
115 : "0" (irq), "1" (desc), "2" (isp),
116 "D" (desc->handle_irq)
117 : "memory", "cc", "ecx");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 return 1;
119}
120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121/*
122 * allocate per-cpu stacks for hardirq and for softirq processing
123 */
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200124void __cpuinit irq_ctx_init(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
126 union irq_ctx *irqctx;
127
Lai Jiangshan42f8fae2009-02-17 11:46:42 +0800128 if (per_cpu(hardirq_ctx, cpu))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 return;
130
Eric Dumazet5c1eb082010-10-28 16:40:54 +0200131 irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
132 THREAD_FLAGS,
133 THREAD_ORDER));
Brian Gerst7b698ea2011-01-17 07:32:10 -0500134 memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200135 irqctx->tinfo.cpu = cpu;
136 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
137 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Lai Jiangshan42f8fae2009-02-17 11:46:42 +0800139 per_cpu(hardirq_ctx, cpu) = irqctx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Eric Dumazet5c1eb082010-10-28 16:40:54 +0200141 irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
142 THREAD_FLAGS,
143 THREAD_ORDER));
Brian Gerst7b698ea2011-01-17 07:32:10 -0500144 memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200145 irqctx->tinfo.cpu = cpu;
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200146 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Lai Jiangshan42f8fae2009-02-17 11:46:42 +0800148 per_cpu(softirq_ctx, cpu) = irqctx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200150 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
Lai Jiangshan42f8fae2009-02-17 11:46:42 +0800151 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152}
153
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154asmlinkage void do_softirq(void)
155{
156 unsigned long flags;
157 struct thread_info *curctx;
158 union irq_ctx *irqctx;
159 u32 *isp;
160
161 if (in_interrupt())
162 return;
163
164 local_irq_save(flags);
165
166 if (local_softirq_pending()) {
167 curctx = current_thread_info();
Tejun Heo0a3aee02010-12-18 16:28:55 +0100168 irqctx = __this_cpu_read(softirq_ctx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 irqctx->tinfo.task = curctx->task;
170 irqctx->tinfo.previous_esp = current_stack_pointer;
171
172 /* build the stack frame on the softirq stack */
Jaswinder Singh Rajput72ade5f2009-01-04 16:32:36 +0530173 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200175 call_on_stack(__do_softirq, isp);
Ingo Molnar55f327f2006-07-03 00:24:43 -0700176 /*
Lucas De Marchi0d2eb442011-03-17 16:24:16 -0300177 * Shouldn't happen, we returned above if in_interrupt():
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200178 */
Ingo Molnar55f327f2006-07-03 00:24:43 -0700179 WARN_ON_ONCE(softirq_count());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 }
181
182 local_irq_restore(flags);
183}
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200184
Jeremy Fitzhardinge9b2b76a2009-02-06 14:09:40 -0800185bool handle_irq(unsigned irq, struct pt_regs *regs)
186{
187 struct irq_desc *desc;
188 int overflow;
189
190 overflow = check_stack_overflow();
191
192 desc = irq_to_desc(irq);
193 if (unlikely(!desc))
194 return false;
195
196 if (!execute_on_irq_stack(overflow, desc, irq)) {
197 if (unlikely(overflow))
198 print_stack_overflow();
199 desc->handle_irq(irq, desc);
200 }
201
202 return true;
203}