blob: 40fc86161d92e10e8d8d8afe740bc5f06a0bfc80 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
3 *
4 * This file contains the lowest level x86-specific interrupt
5 * entry, irq-stacks and irq statistics code. All the remaining
6 * irq logic is done by the generic kernel/irq/ code and
7 * by the x86-specific irq controller code. (e.g. i8259.c and
8 * io_apic.c.)
9 */
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/module.h>
12#include <linux/seq_file.h>
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
Zwane Mwaikambof3705132005-06-25 14:54:50 -070015#include <linux/notifier.h>
16#include <linux/cpu.h>
17#include <linux/delay.h>
Jaswinder Singh Rajput72ade5f2009-01-04 16:32:36 +053018#include <linux/uaccess.h>
Lai Jiangshan42f8fae2009-02-17 11:46:42 +080019#include <linux/percpu.h>
Eric Dumazet5c1eb082010-10-28 16:40:54 +020020#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Thomas Gleixnere05d7232007-02-16 01:27:58 -080022#include <asm/apic.h>
Thomas Gleixnere05d7232007-02-16 01:27:58 -080023
Fenghua Yuf34e3b62007-07-19 01:48:13 -070024DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
Linus Torvalds1da177e2005-04-16 15:20:36 -070025EXPORT_PER_CPU_SYMBOL(irq_stat);
26
Jeremy Fitzhardinge7c3576d2007-05-02 19:27:16 +020027DEFINE_PER_CPU(struct pt_regs *, irq_regs);
28EXPORT_PER_CPU_SYMBOL(irq_regs);
29
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +020030#ifdef CONFIG_DEBUG_STACKOVERFLOW
Ingo Molnar53b56502011-12-05 12:25:44 +010031
32int sysctl_panic_on_stackoverflow __read_mostly;
33
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +020034/* Debugging check for stack overflow: is there less than 1KB free? */
35static int check_stack_overflow(void)
36{
37 long sp;
38
39 __asm__ __volatile__("andl %%esp,%0" :
40 "=r" (sp) : "0" (THREAD_SIZE - 1));
41
42 return sp < (sizeof(struct thread_info) + STACK_WARN);
43}
44
45static void print_stack_overflow(void)
46{
47 printk(KERN_WARNING "low stack detected by irq handler\n");
48 dump_stack();
Mitsuo Hayasaka55af7792011-11-29 15:08:36 +090049 if (sysctl_panic_on_stackoverflow)
50 panic("low stack detected by irq handler - check messages\n");
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +020051}
52
53#else
54static inline int check_stack_overflow(void) { return 0; }
55static inline void print_stack_overflow(void) { }
56#endif
57
Linus Torvalds1da177e2005-04-16 15:20:36 -070058/*
59 * per-CPU IRQ handling contexts (thread information and stack)
60 */
61union irq_ctx {
62 struct thread_info tinfo;
63 u32 stack[THREAD_SIZE/sizeof(u32)];
Christoph Hellwig25897372010-07-27 14:13:13 +020064} __attribute__((aligned(THREAD_SIZE)));
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Lai Jiangshan42f8fae2009-02-17 11:46:42 +080066static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
67static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
Thomas Gleixner403d8ef2008-05-05 18:13:50 +020069static void call_on_stack(void *func, void *stack)
70{
71 asm volatile("xchgl %%ebx,%%esp \n"
72 "call *%%edi \n"
73 "movl %%ebx,%%esp \n"
74 : "=b" (stack)
75 : "0" (stack),
76 "D"(func)
77 : "memory", "cc", "edx", "ecx", "eax");
Andi Kleen04b361a2008-05-05 12:36:38 +020078}
79
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +020080static inline int
81execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
82{
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 union irq_ctx *curctx, *irqctx;
Thomas Gleixner403d8ef2008-05-05 18:13:50 +020084 u32 *isp, arg1, arg2;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
86 curctx = (union irq_ctx *) current_thread_info();
Tejun Heo0a3aee02010-12-18 16:28:55 +010087 irqctx = __this_cpu_read(hardirq_ctx);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
89 /*
90 * this is where we switch to the IRQ stack. However, if we are
91 * already using the IRQ stack (because we interrupted a hardirq
92 * handler) we can't do that and just have to keep using the
93 * current stack (which is the irq stack already after all)
94 */
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +020095 if (unlikely(curctx == irqctx))
96 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +020098 /* build the stack frame on the IRQ stack */
Jaswinder Singh Rajput72ade5f2009-01-04 16:32:36 +053099 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +0200100 irqctx->tinfo.task = curctx->tinfo.task;
101 irqctx->tinfo.previous_esp = current_stack_pointer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +0200103 /*
104 * Copy the softirq bits in preempt_count so that the
105 * softirq checks work in the hardirq context.
106 */
107 irqctx->tinfo.preempt_count =
108 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
109 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
Björn Steinbrinka5d157e2006-06-25 16:24:40 +0200110
Thomas Gleixnerde9b10a2008-05-05 15:58:15 +0200111 if (unlikely(overflow))
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200112 call_on_stack(print_stack_overflow, isp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200114 asm volatile("xchgl %%ebx,%%esp \n"
115 "call *%%edi \n"
116 "movl %%ebx,%%esp \n"
117 : "=a" (arg1), "=d" (arg2), "=b" (isp)
118 : "0" (irq), "1" (desc), "2" (isp),
119 "D" (desc->handle_irq)
120 : "memory", "cc", "ecx");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 return 1;
122}
123
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124/*
125 * allocate per-cpu stacks for hardirq and for softirq processing
126 */
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200127void __cpuinit irq_ctx_init(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128{
129 union irq_ctx *irqctx;
130
Lai Jiangshan42f8fae2009-02-17 11:46:42 +0800131 if (per_cpu(hardirq_ctx, cpu))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 return;
133
Eric Dumazet5c1eb082010-10-28 16:40:54 +0200134 irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
135 THREAD_FLAGS,
136 THREAD_ORDER));
Brian Gerst7b698ea2011-01-17 07:32:10 -0500137 memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200138 irqctx->tinfo.cpu = cpu;
139 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
140 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Lai Jiangshan42f8fae2009-02-17 11:46:42 +0800142 per_cpu(hardirq_ctx, cpu) = irqctx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Eric Dumazet5c1eb082010-10-28 16:40:54 +0200144 irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
145 THREAD_FLAGS,
146 THREAD_ORDER));
Brian Gerst7b698ea2011-01-17 07:32:10 -0500147 memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200148 irqctx->tinfo.cpu = cpu;
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200149 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
Lai Jiangshan42f8fae2009-02-17 11:46:42 +0800151 per_cpu(softirq_ctx, cpu) = irqctx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200153 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
Lai Jiangshan42f8fae2009-02-17 11:46:42 +0800154 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155}
156
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157asmlinkage void do_softirq(void)
158{
159 unsigned long flags;
160 struct thread_info *curctx;
161 union irq_ctx *irqctx;
162 u32 *isp;
163
164 if (in_interrupt())
165 return;
166
167 local_irq_save(flags);
168
169 if (local_softirq_pending()) {
170 curctx = current_thread_info();
Tejun Heo0a3aee02010-12-18 16:28:55 +0100171 irqctx = __this_cpu_read(softirq_ctx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 irqctx->tinfo.task = curctx->task;
173 irqctx->tinfo.previous_esp = current_stack_pointer;
174
175 /* build the stack frame on the softirq stack */
Jaswinder Singh Rajput72ade5f2009-01-04 16:32:36 +0530176 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200178 call_on_stack(__do_softirq, isp);
Ingo Molnar55f327f2006-07-03 00:24:43 -0700179 /*
Lucas De Marchi0d2eb442011-03-17 16:24:16 -0300180 * Shouldn't happen, we returned above if in_interrupt():
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200181 */
Ingo Molnar55f327f2006-07-03 00:24:43 -0700182 WARN_ON_ONCE(softirq_count());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 }
184
185 local_irq_restore(flags);
186}
Thomas Gleixner403d8ef2008-05-05 18:13:50 +0200187
Jeremy Fitzhardinge9b2b76a2009-02-06 14:09:40 -0800188bool handle_irq(unsigned irq, struct pt_regs *regs)
189{
190 struct irq_desc *desc;
191 int overflow;
192
193 overflow = check_stack_overflow();
194
195 desc = irq_to_desc(irq);
196 if (unlikely(!desc))
197 return false;
198
199 if (!execute_on_irq_stack(overflow, desc, irq)) {
200 if (unlikely(overflow))
201 print_stack_overflow();
202 desc->handle_irq(irq, desc);
203 }
204
205 return true;
206}