blob: 0f928a131af83ab8c1fa4370bab691a7f092534d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/kernel/irq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
6 *
Russell King8749af62005-06-25 19:39:45 +01007 * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation.
8 * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and
9 * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>.
10 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This file contains the code used by various IRQ handling routines:
16 * asking for different IRQ's should be done through these routines
17 * instead of just grabbing them. Thus setups with different IRQ numbers
18 * shouldn't result in any weird surprises, and installing new handlers
19 * should be easier.
20 *
21 * IRQ's are in fact implemented a bit like signal handlers for the kernel.
22 * Naturally it's not a 1:1 relation, but there are similarities.
23 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kernel_stat.h>
25#include <linux/module.h>
26#include <linux/signal.h>
27#include <linux/ioport.h>
28#include <linux/interrupt.h>
Thomas Gleixner4a2581a2006-07-01 22:30:09 +010029#include <linux/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/random.h>
31#include <linux/smp.h>
32#include <linux/init.h>
33#include <linux/seq_file.h>
34#include <linux/errno.h>
35#include <linux/list.h>
36#include <linux/kallsyms.h>
37#include <linux/proc_fs.h>
Rabin Vincent61b5cb12010-10-07 20:51:58 +053038#include <linux/ftrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <asm/system.h>
Russell King8ff14432010-12-20 10:18:36 +000041#include <asm/mach/arch.h>
Russell King897d8522008-08-03 15:04:04 +010042#include <asm/mach/irq.h>
Russell King8749af62005-06-25 19:39:45 +010043#include <asm/mach/time.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 * No architecture-specific irq_finish function defined in arm/arch/irqs.h.
47 */
48#ifndef irq_finish
49#define irq_finish(irq) do { } while (0)
50#endif
51
Thomas Gleixner4a2581a2006-07-01 22:30:09 +010052unsigned long irq_err_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Thomas Gleixner25a56622011-03-24 12:02:11 +010054int arch_show_interrupts(struct seq_file *p, int prec)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Ben Dooksbaa28e32009-08-03 15:11:29 +010056#ifdef CONFIG_FIQ
Thomas Gleixner25a56622011-03-24 12:02:11 +010057 show_fiq_list(p, prec);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#endif
59#ifdef CONFIG_SMP
Thomas Gleixner25a56622011-03-24 12:02:11 +010060 show_ipi_list(p, prec);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061#endif
Russell Kingec405ea2010-11-15 13:38:06 +000062#ifdef CONFIG_LOCAL_TIMERS
Thomas Gleixner25a56622011-03-24 12:02:11 +010063 show_local_irqs(p, prec);
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#endif
Thomas Gleixner25a56622011-03-24 12:02:11 +010065 seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 return 0;
67}
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069/*
70 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
71 * come via this function. Instead, they should provide their
72 * own 'handler'
73 */
Rabin Vincent61b5cb12010-10-07 20:51:58 +053074asmlinkage void __exception_irq_entry
75asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -070076{
Linus Torvaldse6300152006-10-06 13:11:15 -070077 struct pt_regs *old_regs = set_irq_regs(regs);
Dmitry Baryshkovd8aa0252008-10-09 13:36:24 +010078
79 irq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
81 /*
82 * Some hardware gives randomly wrong interrupts. Rather
83 * than crashing, do something sensible.
84 */
eric miao354e6f72010-06-25 09:46:09 +010085 if (unlikely(irq >= nr_irqs)) {
Aaro Koskinen7aa55142009-06-22 09:23:36 +010086 if (printk_ratelimit())
87 printk(KERN_WARNING "Bad IRQ%u\n", irq);
88 ack_bad_irq(irq);
89 } else {
Dmitry Baryshkovd8aa0252008-10-09 13:36:24 +010090 generic_handle_irq(irq);
Aaro Koskinen7aa55142009-06-22 09:23:36 +010091 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Thomas Gleixner4a2581a2006-07-01 22:30:09 +010093 /* AT91 specific workaround */
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 irq_finish(irq);
95
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 irq_exit();
Linus Torvaldse6300152006-10-06 13:11:15 -070097 set_irq_regs(old_regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -070098}
99
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100void set_irq_flags(unsigned int irq, unsigned int iflags)
101{
Thomas Gleixner1b7a2d92011-02-07 22:30:49 +0100102 unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
eric miao354e6f72010-06-25 09:46:09 +0100104 if (irq >= nr_irqs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
106 return;
107 }
108
Thomas Gleixner4a2581a2006-07-01 22:30:09 +0100109 if (iflags & IRQF_VALID)
Thomas Gleixner1b7a2d92011-02-07 22:30:49 +0100110 clr |= IRQ_NOREQUEST;
Thomas Gleixner4a2581a2006-07-01 22:30:09 +0100111 if (iflags & IRQF_PROBE)
Thomas Gleixner1b7a2d92011-02-07 22:30:49 +0100112 clr |= IRQ_NOPROBE;
Thomas Gleixner4a2581a2006-07-01 22:30:09 +0100113 if (!(iflags & IRQF_NOAUTOEN))
Thomas Gleixner1b7a2d92011-02-07 22:30:49 +0100114 clr |= IRQ_NOAUTOEN;
115 /* Order is clear bits in "clr" then set bits in "set" */
116 irq_modify_status(irq, clr, set & ~clr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117}
118
119void __init init_IRQ(void)
120{
Russell King8ff14432010-12-20 10:18:36 +0000121 machine_desc->init_irq();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122}
123
eric miao354e6f72010-06-25 09:46:09 +0100124#ifdef CONFIG_SPARSE_IRQ
125int __init arch_probe_nr_irqs(void)
126{
Russell King8ff14432010-12-20 10:18:36 +0000127 nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS;
Thomas Gleixnerb683de22010-09-27 20:55:03 +0200128 return nr_irqs;
eric miao354e6f72010-06-25 09:46:09 +0100129}
130#endif
131
Russell Kinga054a812005-11-02 22:24:33 +0000132#ifdef CONFIG_HOTPLUG_CPU
Thomas Gleixnerf7ede372006-07-11 22:54:34 +0100133
Russell King78359cb2011-07-21 15:14:21 +0100134static bool migrate_one_irq(struct irq_desc *desc)
Thomas Gleixnerf7ede372006-07-11 22:54:34 +0100135{
Russell King78359cb2011-07-21 15:14:21 +0100136 struct irq_data *d = irq_desc_get_irq_data(desc);
Russell Kingca15af12011-07-21 15:07:56 +0100137 const struct cpumask *affinity = d->affinity;
Russell King78359cb2011-07-21 15:14:21 +0100138 struct irq_chip *c;
Russell King61791242011-01-23 12:09:36 +0000139 bool ret = false;
Thomas Gleixnerf7ede372006-07-11 22:54:34 +0100140
Russell King78359cb2011-07-21 15:14:21 +0100141 /*
142 * If this is a per-CPU interrupt, or the affinity does not
143 * include this CPU, then we have nothing to do.
144 */
145 if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
146 return false;
147
Russell Kingca15af12011-07-21 15:07:56 +0100148 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
Russell King78359cb2011-07-21 15:14:21 +0100149 affinity = cpu_online_mask;
Russell King61791242011-01-23 12:09:36 +0000150 ret = true;
151 }
152
Russell King78359cb2011-07-21 15:14:21 +0100153 c = irq_data_get_irq_chip(d);
154 if (c->irq_set_affinity)
155 c->irq_set_affinity(d, affinity, true);
156 else
157 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
Russell King61791242011-01-23 12:09:36 +0000158
159 return ret;
Thomas Gleixnerf7ede372006-07-11 22:54:34 +0100160}
161
Russell Kinga054a812005-11-02 22:24:33 +0000162/*
Russell King78359cb2011-07-21 15:14:21 +0100163 * The current CPU has been marked offline. Migrate IRQs off this CPU.
164 * If the affinity settings do not allow other CPUs, force them onto any
Russell Kinga054a812005-11-02 22:24:33 +0000165 * available CPU.
Russell King78359cb2011-07-21 15:14:21 +0100166 *
167 * Note: we must iterate over all IRQs, whether they have an attached
168 * action structure or not, as we need to get chained interrupts too.
Russell Kinga054a812005-11-02 22:24:33 +0000169 */
170void migrate_irqs(void)
171{
Russell King78359cb2011-07-21 15:14:21 +0100172 unsigned int i;
eric miao354e6f72010-06-25 09:46:09 +0100173 struct irq_desc *desc;
Russell King61791242011-01-23 12:09:36 +0000174 unsigned long flags;
175
176 local_irq_save(flags);
Russell Kinga054a812005-11-02 22:24:33 +0000177
eric miao354e6f72010-06-25 09:46:09 +0100178 for_each_irq_desc(i, desc) {
Russell King61791242011-01-23 12:09:36 +0000179 bool affinity_broken = false;
Lennert Buytenhekf64305a2010-11-29 10:21:48 +0100180
Russell King78359cb2011-07-21 15:14:21 +0100181 if (!desc)
182 continue;
183
Russell King61791242011-01-23 12:09:36 +0000184 raw_spin_lock(&desc->lock);
Russell King78359cb2011-07-21 15:14:21 +0100185 affinity_broken = migrate_one_irq(desc);
Russell King61791242011-01-23 12:09:36 +0000186 raw_spin_unlock(&desc->lock);
187
188 if (affinity_broken && printk_ratelimit())
Russell King78359cb2011-07-21 15:14:21 +0100189 pr_warning("IRQ%u no longer affine to CPU%u\n", i,
190 smp_processor_id());
Russell Kinga054a812005-11-02 22:24:33 +0000191 }
Russell King61791242011-01-23 12:09:36 +0000192
193 local_irq_restore(flags);
Russell Kinga054a812005-11-02 22:24:33 +0000194}
195#endif /* CONFIG_HOTPLUG_CPU */