blob: 54e2b2b2e250f6f7378d3aab56e84c18e4402108 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
3 *
4 * This file contains the lowest level x86_64-specific interrupt
5 * entry and irq statistics code. All the remaining irq logic is
6 * done by the generic kernel/irq/ code and in the
7 * x86_64-specific irq controller code. (e.g. i8259.c and
8 * io_apic.c.)
9 */
10
11#include <linux/kernel_stat.h>
12#include <linux/interrupt.h>
13#include <linux/seq_file.h>
14#include <linux/module.h>
Ashok Raj76e4f662005-06-25 14:55:00 -070015#include <linux/delay.h>
Frederic Weisbeckerbcbc4f22008-12-09 23:54:20 +010016#include <linux/ftrace.h>
Jaswinder Singh Rajput5f66b2a2009-01-04 16:25:19 +053017#include <linux/uaccess.h>
18#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/io_apic.h>
Andi Kleen95833c82006-01-11 22:44:36 +010020#include <asm/idle.h>
Brian Gerst3819cd42009-01-23 11:03:29 +090021#include <asm/apic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Brian Gerst1b437c82009-01-19 00:38:57 +090023DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
24EXPORT_PER_CPU_SYMBOL(irq_stat);
25
Brian Gerstd650a512009-01-21 17:26:06 +090026DEFINE_PER_CPU(struct pt_regs *, irq_regs);
27EXPORT_PER_CPU_SYMBOL(irq_regs);
28
Mitsuo Hayasaka55af7792011-11-29 15:08:36 +090029int sysctl_panic_on_stackoverflow;
30
Eric Sandeen4961f102006-06-26 14:00:05 +020031/*
32 * Probabilistic stack overflow check:
33 *
34 * Only check the stack in process context, because everything else
35 * runs on the big interrupt stacks. Checking reliably is too expensive,
36 * so we just check from interrupts.
37 */
38static inline void stack_overflow_check(struct pt_regs *regs)
39{
Ingo Molnarf377fa12008-11-23 09:02:26 +010040#ifdef CONFIG_DEBUG_STACKOVERFLOW
Mitsuo Hayasaka37fe6a42011-11-29 15:08:29 +090041 struct orig_ist *oist;
42 u64 irq_stack_top, irq_stack_bottom;
43 u64 estack_top, estack_bottom;
Roman Zippelc9f4f062007-05-09 02:35:16 -070044 u64 curbase = (u64)task_stack_page(current);
Eric Sandeen4961f102006-06-26 14:00:05 +020045
Mitsuo Hayasaka69682b62011-11-29 15:08:21 +090046 if (user_mode_vm(regs))
47 return;
48
Mitsuo Hayasaka467e6b72011-11-29 15:08:45 +090049 if (regs->sp >= curbase + sizeof(struct thread_info) +
50 sizeof(struct pt_regs) + 128 &&
51 regs->sp <= curbase + THREAD_SIZE)
Mitsuo Hayasaka37fe6a42011-11-29 15:08:29 +090052 return;
Ingo Molnarf377fa12008-11-23 09:02:26 +010053
Mitsuo Hayasaka37fe6a42011-11-29 15:08:29 +090054 irq_stack_top = (u64)__get_cpu_var(irq_stack_union.irq_stack);
55 irq_stack_bottom = (u64)__get_cpu_var(irq_stack_ptr);
56 if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom)
57 return;
58
59 oist = &__get_cpu_var(orig_ist);
60 estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ;
61 estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1];
62 if (regs->sp >= estack_top && regs->sp <= estack_bottom)
63 return;
64
65 WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx)\n",
66 current->comm, curbase, regs->sp,
67 irq_stack_top, irq_stack_bottom,
68 estack_top, estack_bottom);
Mitsuo Hayasaka55af7792011-11-29 15:08:36 +090069
70 if (sysctl_panic_on_stackoverflow)
71 panic("low stack detected by irq handler - check messages\n");
Eric Sandeen4961f102006-06-26 14:00:05 +020072#endif
Ingo Molnarf377fa12008-11-23 09:02:26 +010073}
Eric Sandeen4961f102006-06-26 14:00:05 +020074
Jeremy Fitzhardinge9b2b76a2009-02-06 14:09:40 -080075bool handle_irq(unsigned irq, struct pt_regs *regs)
76{
77 struct irq_desc *desc;
78
79 stack_overflow_check(regs);
80
81 desc = irq_to_desc(irq);
82 if (unlikely(!desc))
83 return false;
84
85 generic_handle_irq_desc(irq, desc);
86 return true;
87}
88
Andi Kleened6b6762005-07-28 21:15:49 -070089
90extern void call_softirq(void);
91
92asmlinkage void do_softirq(void)
93{
Jaswinder Singh Rajput5f66b2a2009-01-04 16:25:19 +053094 __u32 pending;
95 unsigned long flags;
Andi Kleened6b6762005-07-28 21:15:49 -070096
Jaswinder Singh Rajput5f66b2a2009-01-04 16:25:19 +053097 if (in_interrupt())
98 return;
Andi Kleened6b6762005-07-28 21:15:49 -070099
Jaswinder Singh Rajput5f66b2a2009-01-04 16:25:19 +0530100 local_irq_save(flags);
101 pending = local_softirq_pending();
102 /* Switch to interrupt stack */
103 if (pending) {
Andi Kleened6b6762005-07-28 21:15:49 -0700104 call_softirq();
Ingo Molnar2601e642006-07-03 00:24:45 -0700105 WARN_ON_ONCE(softirq_count());
106 }
Jaswinder Singh Rajput5f66b2a2009-01-04 16:25:19 +0530107 local_irq_restore(flags);
Andi Kleened6b6762005-07-28 21:15:49 -0700108}