Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar |
| 3 | * |
| 4 | * This file contains the lowest level x86-specific interrupt |
| 5 | * entry, irq-stacks and irq statistics code. All the remaining |
| 6 | * irq logic is done by the generic kernel/irq/ code and |
| 7 | * by the x86-specific irq controller code. (e.g. i8259.c and |
| 8 | * io_apic.c.) |
| 9 | */ |
| 10 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/module.h> |
| 12 | #include <linux/seq_file.h> |
| 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/kernel_stat.h> |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 15 | #include <linux/notifier.h> |
| 16 | #include <linux/cpu.h> |
| 17 | #include <linux/delay.h> |
Jaswinder Singh Rajput | 72ade5f | 2009-01-04 16:32:36 +0530 | [diff] [blame] | 18 | #include <linux/uaccess.h> |
Lai Jiangshan | 42f8fae | 2009-02-17 11:46:42 +0800 | [diff] [blame] | 19 | #include <linux/percpu.h> |
Eric Dumazet | 5c1eb08 | 2010-10-28 16:40:54 +0200 | [diff] [blame] | 20 | #include <linux/mm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | |
Thomas Gleixner | e05d723 | 2007-02-16 01:27:58 -0800 | [diff] [blame] | 22 | #include <asm/apic.h> |
Thomas Gleixner | e05d723 | 2007-02-16 01:27:58 -0800 | [diff] [blame] | 23 | |
Fenghua Yu | f34e3b6 | 2007-07-19 01:48:13 -0700 | [diff] [blame] | 24 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | EXPORT_PER_CPU_SYMBOL(irq_stat); |
| 26 | |
Jeremy Fitzhardinge | 7c3576d | 2007-05-02 19:27:16 +0200 | [diff] [blame] | 27 | DEFINE_PER_CPU(struct pt_regs *, irq_regs); |
| 28 | EXPORT_PER_CPU_SYMBOL(irq_regs); |
| 29 | |
Thomas Gleixner | de9b10a | 2008-05-05 15:58:15 +0200 | [diff] [blame] | 30 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
| 31 | /* Debugging check for stack overflow: is there less than 1KB free? */ |
| 32 | static int check_stack_overflow(void) |
| 33 | { |
| 34 | long sp; |
| 35 | |
| 36 | __asm__ __volatile__("andl %%esp,%0" : |
| 37 | "=r" (sp) : "0" (THREAD_SIZE - 1)); |
| 38 | |
| 39 | return sp < (sizeof(struct thread_info) + STACK_WARN); |
| 40 | } |
| 41 | |
| 42 | static void print_stack_overflow(void) |
| 43 | { |
| 44 | printk(KERN_WARNING "low stack detected by irq handler\n"); |
| 45 | dump_stack(); |
| 46 | } |
| 47 | |
| 48 | #else |
| 49 | static inline int check_stack_overflow(void) { return 0; } |
| 50 | static inline void print_stack_overflow(void) { } |
| 51 | #endif |
| 52 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | /* |
| 54 | * per-CPU IRQ handling contexts (thread information and stack) |
| 55 | */ |
| 56 | union irq_ctx { |
| 57 | struct thread_info tinfo; |
| 58 | u32 stack[THREAD_SIZE/sizeof(u32)]; |
Christoph Hellwig | 2589737 | 2010-07-27 14:13:13 +0200 | [diff] [blame] | 59 | } __attribute__((aligned(THREAD_SIZE))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | |
Lai Jiangshan | 42f8fae | 2009-02-17 11:46:42 +0800 | [diff] [blame] | 61 | static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); |
| 62 | static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 64 | static void call_on_stack(void *func, void *stack) |
| 65 | { |
| 66 | asm volatile("xchgl %%ebx,%%esp \n" |
| 67 | "call *%%edi \n" |
| 68 | "movl %%ebx,%%esp \n" |
| 69 | : "=b" (stack) |
| 70 | : "0" (stack), |
| 71 | "D"(func) |
| 72 | : "memory", "cc", "edx", "ecx", "eax"); |
Andi Kleen | 04b361a | 2008-05-05 12:36:38 +0200 | [diff] [blame] | 73 | } |
| 74 | |
Thomas Gleixner | de9b10a | 2008-05-05 15:58:15 +0200 | [diff] [blame] | 75 | static inline int |
| 76 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) |
| 77 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | union irq_ctx *curctx, *irqctx; |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 79 | u32 *isp, arg1, arg2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | |
| 81 | curctx = (union irq_ctx *) current_thread_info(); |
Tejun Heo | 0a3aee0 | 2010-12-18 16:28:55 +0100 | [diff] [blame^] | 82 | irqctx = __this_cpu_read(hardirq_ctx); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | |
| 84 | /* |
| 85 | * this is where we switch to the IRQ stack. However, if we are |
| 86 | * already using the IRQ stack (because we interrupted a hardirq |
| 87 | * handler) we can't do that and just have to keep using the |
| 88 | * current stack (which is the irq stack already after all) |
| 89 | */ |
Thomas Gleixner | de9b10a | 2008-05-05 15:58:15 +0200 | [diff] [blame] | 90 | if (unlikely(curctx == irqctx)) |
| 91 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | |
Thomas Gleixner | de9b10a | 2008-05-05 15:58:15 +0200 | [diff] [blame] | 93 | /* build the stack frame on the IRQ stack */ |
Jaswinder Singh Rajput | 72ade5f | 2009-01-04 16:32:36 +0530 | [diff] [blame] | 94 | isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); |
Thomas Gleixner | de9b10a | 2008-05-05 15:58:15 +0200 | [diff] [blame] | 95 | irqctx->tinfo.task = curctx->tinfo.task; |
| 96 | irqctx->tinfo.previous_esp = current_stack_pointer; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | |
Thomas Gleixner | de9b10a | 2008-05-05 15:58:15 +0200 | [diff] [blame] | 98 | /* |
| 99 | * Copy the softirq bits in preempt_count so that the |
| 100 | * softirq checks work in the hardirq context. |
| 101 | */ |
| 102 | irqctx->tinfo.preempt_count = |
| 103 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | |
| 104 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); |
Björn Steinbrink | a5d157e | 2006-06-25 16:24:40 +0200 | [diff] [blame] | 105 | |
Thomas Gleixner | de9b10a | 2008-05-05 15:58:15 +0200 | [diff] [blame] | 106 | if (unlikely(overflow)) |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 107 | call_on_stack(print_stack_overflow, isp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 109 | asm volatile("xchgl %%ebx,%%esp \n" |
| 110 | "call *%%edi \n" |
| 111 | "movl %%ebx,%%esp \n" |
| 112 | : "=a" (arg1), "=d" (arg2), "=b" (isp) |
| 113 | : "0" (irq), "1" (desc), "2" (isp), |
| 114 | "D" (desc->handle_irq) |
| 115 | : "memory", "cc", "ecx"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | return 1; |
| 117 | } |
| 118 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | /* |
| 120 | * allocate per-cpu stacks for hardirq and for softirq processing |
| 121 | */ |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 122 | void __cpuinit irq_ctx_init(int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | { |
| 124 | union irq_ctx *irqctx; |
| 125 | |
Lai Jiangshan | 42f8fae | 2009-02-17 11:46:42 +0800 | [diff] [blame] | 126 | if (per_cpu(hardirq_ctx, cpu)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | return; |
| 128 | |
Eric Dumazet | 5c1eb08 | 2010-10-28 16:40:54 +0200 | [diff] [blame] | 129 | irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), |
| 130 | THREAD_FLAGS, |
| 131 | THREAD_ORDER)); |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 132 | irqctx->tinfo.task = NULL; |
| 133 | irqctx->tinfo.exec_domain = NULL; |
| 134 | irqctx->tinfo.cpu = cpu; |
| 135 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; |
| 136 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | |
Lai Jiangshan | 42f8fae | 2009-02-17 11:46:42 +0800 | [diff] [blame] | 138 | per_cpu(hardirq_ctx, cpu) = irqctx; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | |
Eric Dumazet | 5c1eb08 | 2010-10-28 16:40:54 +0200 | [diff] [blame] | 140 | irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), |
| 141 | THREAD_FLAGS, |
| 142 | THREAD_ORDER)); |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 143 | irqctx->tinfo.task = NULL; |
| 144 | irqctx->tinfo.exec_domain = NULL; |
| 145 | irqctx->tinfo.cpu = cpu; |
| 146 | irqctx->tinfo.preempt_count = 0; |
| 147 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | |
Lai Jiangshan | 42f8fae | 2009-02-17 11:46:42 +0800 | [diff] [blame] | 149 | per_cpu(softirq_ctx, cpu) = irqctx; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 151 | printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", |
Lai Jiangshan | 42f8fae | 2009-02-17 11:46:42 +0800 | [diff] [blame] | 152 | cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | } |
| 154 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | asmlinkage void do_softirq(void) |
| 156 | { |
| 157 | unsigned long flags; |
| 158 | struct thread_info *curctx; |
| 159 | union irq_ctx *irqctx; |
| 160 | u32 *isp; |
| 161 | |
| 162 | if (in_interrupt()) |
| 163 | return; |
| 164 | |
| 165 | local_irq_save(flags); |
| 166 | |
| 167 | if (local_softirq_pending()) { |
| 168 | curctx = current_thread_info(); |
Tejun Heo | 0a3aee0 | 2010-12-18 16:28:55 +0100 | [diff] [blame^] | 169 | irqctx = __this_cpu_read(softirq_ctx); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | irqctx->tinfo.task = curctx->task; |
| 171 | irqctx->tinfo.previous_esp = current_stack_pointer; |
| 172 | |
| 173 | /* build the stack frame on the softirq stack */ |
Jaswinder Singh Rajput | 72ade5f | 2009-01-04 16:32:36 +0530 | [diff] [blame] | 174 | isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 176 | call_on_stack(__do_softirq, isp); |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 177 | /* |
| 178 | * Shouldnt happen, we returned above if in_interrupt(): |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 179 | */ |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 180 | WARN_ON_ONCE(softirq_count()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | } |
| 182 | |
| 183 | local_irq_restore(flags); |
| 184 | } |
Thomas Gleixner | 403d8ef | 2008-05-05 18:13:50 +0200 | [diff] [blame] | 185 | |
Jeremy Fitzhardinge | 9b2b76a | 2009-02-06 14:09:40 -0800 | [diff] [blame] | 186 | bool handle_irq(unsigned irq, struct pt_regs *regs) |
| 187 | { |
| 188 | struct irq_desc *desc; |
| 189 | int overflow; |
| 190 | |
| 191 | overflow = check_stack_overflow(); |
| 192 | |
| 193 | desc = irq_to_desc(irq); |
| 194 | if (unlikely(!desc)) |
| 195 | return false; |
| 196 | |
| 197 | if (!execute_on_irq_stack(overflow, desc, irq)) { |
| 198 | if (unlikely(overflow)) |
| 199 | print_stack_overflow(); |
| 200 | desc->handle_irq(irq, desc); |
| 201 | } |
| 202 | |
| 203 | return true; |
| 204 | } |